Unhashable type: 'slice'

H, I have a pytorch model that I’d like to use fastai with, I’m following this tutorial:

my implementation looks like this:

import importlib
import fastai
from fastai.optimizer import OptimWrapper
def opt_func(params, **kwargs): return OptimWrapper(torch.optim.AdamW(params, lr=0.001))
from fastai.data.core import DataLoaders
dls = DataLoaders(training_loader, testing_loader)
from fastai.learner import Learner
from fastai.callback.progress import ProgressCallback
learn= Learner(dls, model, loss_func=loss_fn, opt_func=SGD)
learn.fit(2)

but I get this error:

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
Input In [28], in <cell line: 1>()
----> 1 learn.fit(2)

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:222, in Learner.fit(self, n_epoch, lr, wd, cbs, reset_opt)
    220 self.opt.set_hypers(lr=self.lr if lr is None else lr)
    221 self.n_epoch = n_epoch
--> 222 self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:164, in Learner._with_events(self, f, event_type, ex, final)
    163 def _with_events(self, f, event_type, ex, final=noop):
--> 164     try: self(f'before_{event_type}');  f()
    165     except ex: self(f'after_cancel_{event_type}')
    166     self(f'after_{event_type}');  final()

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:213, in Learner._do_fit(self)
    211 for epoch in range(self.n_epoch):
    212     self.epoch=epoch
--> 213     self._with_events(self._do_epoch, 'epoch', CancelEpochException)

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:164, in Learner._with_events(self, f, event_type, ex, final)
    163 def _with_events(self, f, event_type, ex, final=noop):
--> 164     try: self(f'before_{event_type}');  f()
    165     except ex: self(f'after_cancel_{event_type}')
    166     self(f'after_{event_type}');  final()

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:207, in Learner._do_epoch(self)
    206 def _do_epoch(self):
--> 207     self._do_epoch_train()
    208     self._do_epoch_validate()

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:199, in Learner._do_epoch_train(self)
    197 def _do_epoch_train(self):
    198     self.dl = self.dls.train
--> 199     self._with_events(self.all_batches, 'train', CancelTrainException)

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:164, in Learner._with_events(self, f, event_type, ex, final)
    163 def _with_events(self, f, event_type, ex, final=noop):
--> 164     try: self(f'before_{event_type}');  f()
    165     except ex: self(f'after_cancel_{event_type}')
    166     self(f'after_{event_type}');  final()

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:170, in Learner.all_batches(self)
    168 def all_batches(self):
    169     self.n_iter = len(self.dl)
--> 170     for o in enumerate(self.dl): self.one_batch(*o)

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:194, in Learner.one_batch(self, i, b)
    192 self.iter = i
    193 b = self._set_device(b)
--> 194 self._split(b)
    195 self._with_events(self._do_one_batch, 'batch', CancelBatchException)

File ~/venvs/twipper_v3/lib64/python3.8/site-packages/fastai/learner.py:161, in Learner._split(self, b)
    159 def _split(self, b):
    160     i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
--> 161     self.xb,self.yb = b[:i],b[i:]

TypeError: unhashable type: 'slice'

the model works in pytorch so I’m not sure what I could have done wrong

I suppose your dataloader doesn’t return a list, that why you can’t use [:i]. may be you return a dictionary ?

So, I use the basic pytorch dataloader which should not be the issue, however I have a custom dataset class which does returns a dict

class CustomDataset(Dataset):

    def __init__(self, dataframe, tokenizer, max_len):
        self.tokenizer = tokenizer
        self.data = dataframe
        self.extrait = dataframe.extrait
        self.targets = self.data['oneHot']
        self.max_len = max_len

    def __len__(self):
        return len(self.extrait)

    def __getitem__(self, index):
        extrait = str(self.extrait[index])
        extrait = " ".join(extrait.split())

        inputs = self.tokenizer.encode_plus(
            extrait,
            None,
            add_special_tokens=True,
            max_length=self.max_len,
            pad_to_max_length=True,
            return_token_type_ids=True
        )
        ids = inputs['input_ids']
        mask = inputs['attention_mask']
        token_type_ids = inputs["token_type_ids"]

        return {
            'ids': torch.tensor(ids, dtype=torch.long),
            'mask': torch.tensor(mask, dtype=torch.long),
            'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
            'targets': torch.tensor(self.targets[index], dtype=torch.float)
        }
# Creating the dataset and dataloader for the neural network

train_size = 0.8
train_dataset=new_df.sample(frac=train_size,random_state=200)
test_dataset=new_df.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)

training_set = CustomDataset(train_dataset, tokenizer, MAX_LEN)
testing_set = CustomDataset(test_dataset, tokenizer, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE,
                'shuffle': True,
                'num_workers': 0
                }

test_params = {'batch_size': VALID_BATCH_SIZE,
                'shuffle': True,
                'num_workers': 0
                }

training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)

In that case, what do you think the output of dataset.__getitem__ should be ? torch.tensor(self.targets[index], dtype=torch.float) perhaps ?