Changed that code to this
from torchvision.transforms import ColorJitter
from transformers import SegformerFeatureExtractor
feature_extractor = SegformerFeatureExtractor()
jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
def train_transforms(example_batch):
images = [jitter(x) for x in example_batch['pixel_values']]
labels = [x.convert("L") for x in example_batch['label']]
inputs = feature_extractor(images, labels)
return inputs
def val_transforms(example_batch):
images=[x for x in example_batch['pixel_values']]
labels = [x.convert("L") for x in example_batch['label']]
inputs = feature_extractor(images, labels)
return inputs
# Set transforms
train_ds.set_transform(train_transforms)
test_ds.set_transform(val_transforms)
if that makes any sense.
Now the error changes into target 11 out of bounds
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
Cell In[125], line 1
----> 1 trainer.train()
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\transformers\trainer.py:2155, in Trainer.train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
2152 try:
2153 # Disable progress bars when uploading models during checkpoints to avoid polluting stdout
2154 hf_hub_utils.disable_progress_bars()
-> 2155 return inner_training_loop(
2156 args=args,
2157 resume_from_checkpoint=resume_from_checkpoint,
2158 trial=trial,
2159 ignore_keys_for_eval=ignore_keys_for_eval,
2160 )
2161 finally:
2162 hf_hub_utils.enable_progress_bars()
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\transformers\trainer.py:2522, in Trainer._inner_training_loop(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)
2516 context = (
2517 functools.partial(self.accelerator.no_sync, model=model)
2518 if i != len(batch_samples) - 1
2519 else contextlib.nullcontext
2520 )
2521 with context():
-> 2522 tr_loss_step = self.training_step(model, inputs, num_items_in_batch)
2524 if (
2525 args.logging_nan_inf_filter
2526 and not is_torch_xla_available()
2527 and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
2528 ):
2529 # if loss is nan or inf simply add the average of previous logged losses
2530 tr_loss = tr_loss + tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\transformers\trainer.py:3655, in Trainer.training_step(self, model, inputs, num_items_in_batch)
3653 loss = self.compute_loss(model, inputs)
3654 else:
-> 3655 loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch)
3657 del inputs
3658 if (
3659 self.args.torch_empty_cache_steps is not None
3660 and self.state.global_step % self.args.torch_empty_cache_steps == 0
3661 ):
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\transformers\trainer.py:3709, in Trainer.compute_loss(self, model, inputs, return_outputs, num_items_in_batch)
3707 loss_kwargs["num_items_in_batch"] = num_items_in_batch
3708 inputs = {**inputs, **loss_kwargs}
-> 3709 outputs = model(**inputs)
3710 # Save past state if it exists
3711 # TODO: this needs to be fixed and made cleaner later.
3712 if self.args.past_index >= 0:
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\torch\nn\modules\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
1734 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1735 else:
-> 1736 return self._call_impl(*args, **kwargs)
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\torch\nn\modules\module.py:1747, in Module._call_impl(self, *args, **kwargs)
1742 # If we don't have any hooks, we want to skip the rest of the logic in
1743 # this function, and just call forward.
1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1745 or _global_backward_pre_hooks or _global_backward_hooks
1746 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747 return forward_call(*args, **kwargs)
1749 result = None
1750 called_always_called_hooks = set()
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\transformers\models\segformer\modeling_segformer.py:809, in SegformerForSemanticSegmentation.forward(self, pixel_values, labels, output_attentions, output_hidden_states, return_dict)
807 if self.config.num_labels > 1:
808 loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
--> 809 loss = loss_fct(upsampled_logits, labels)
810 elif self.config.num_labels == 1:
811 valid_mask = ((labels >= 0) & (labels != self.config.semantic_loss_ignore_index)).float()
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\torch\nn\modules\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
1734 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1735 else:
-> 1736 return self._call_impl(*args, **kwargs)
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\torch\nn\modules\module.py:1747, in Module._call_impl(self, *args, **kwargs)
1742 # If we don't have any hooks, we want to skip the rest of the logic in
1743 # this function, and just call forward.
1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1745 or _global_backward_pre_hooks or _global_backward_hooks
1746 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747 return forward_call(*args, **kwargs)
1749 result = None
1750 called_always_called_hooks = set()
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\torch\nn\modules\loss.py:1293, in CrossEntropyLoss.forward(self, input, target)
1292 def forward(self, input: Tensor, target: Tensor) -> Tensor:
-> 1293 return F.cross_entropy(
1294 input,
1295 target,
1296 weight=self.weight,
1297 ignore_index=self.ignore_index,
1298 reduction=self.reduction,
1299 label_smoothing=self.label_smoothing,
1300 )
File c:\Users\Lenovo\miniconda3\envs\pretrain-huggingface\Lib\site-packages\torch\nn\functional.py:3479, in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
3477 if size_average is not None or reduce is not None:
3478 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 3479 return torch._C._nn.cross_entropy_loss(
3480 input,
3481 target,
3482 weight,
3483 _Reduction.get_enum(reduction),
3484 ignore_index,
3485 label_smoothing,
3486 )
IndexError: Target 11 is out of bounds.