Fastai v2 vision

Did you check the order of your transforms? Pretty sure Resize has an order of 10, and in the code you show HeatMapScaler had an order of 1.

1 Like

Ahhhh that would do it! Thank you. That makes more sense. :slight_smile: will check later but that is definitely one thing I missed!

Hi @sguggerā€”but is there a way to specify it?

Well even if I follow the MNIST example, and just re-run it on my own instance, this is what I get on the last line learn.fit_one_cycle(epochs, lr)

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-12-7c21d2a641cb> in <module>
----> 1 learn.fit_one_cycle(epochs, lr)

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
     88     scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
     89               'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
---> 90     self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
     91 
     92 # Cell

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
    287                     try:
    288                         self.epoch=epoch;          self('begin_epoch')
--> 289                         self._do_epoch_train()
    290                         self._do_epoch_validate()
    291                     except CancelEpochException:   self('after_cancel_epoch')

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in _do_epoch_train(self)
    262         try:
    263             self.dl = self.dbunch.train_dl;                  self('begin_train')
--> 264             self.all_batches()
    265         except CancelTrainException:                         self('after_cancel_train')
    266         finally:                                             self('after_train')

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in all_batches(self)
    240     def all_batches(self):
    241         self.n_iter = len(self.dl)
--> 242         for o in enumerate(self.dl): self.one_batch(*o)
    243 
    244     def one_batch(self, i, b):

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/load.py in __iter__(self)
     95         self.randomize()
     96         self.before_iter()
---> 97         for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
     98             if self.device is not None: b = to_device(b, self.device)
     99             yield self.after_batch(b)

~/daisy-gan/venv/lib/python3.6/site-packages/torch/utils/data/dataloader.py in __init__(self, loader)
    671 
    672     def __init__(self, loader):
--> 673         super(_MultiProcessingDataLoaderIter, self).__init__(loader)
    674 
    675         assert self._num_workers > 0

~/daisy-gan/venv/lib/python3.6/site-packages/torch/utils/data/dataloader.py in __init__(self, loader)
    321         self._dataset = loader.dataset
    322         self._dataset_kind = loader._dataset_kind
--> 323         self._IterableDataset_len_called = loader._IterableDataset_len_called
    324         self._auto_collation = loader._auto_collation
    325         self._drop_last = loader.drop_last

~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __getattr__(self, k)
    221             attr = getattr(self,self._default,None)
    222             if attr is not None: return getattr(attr, k)
--> 223         raise AttributeError(k)
    224     def __dir__(self): return custom_dir(self, self._dir() if self._xtra is None else self._dir())
    225 #     def __getstate__(self): return self.__dict__

AttributeError: _IterableDataset_len_called
1 Like

This error means you have PyTorch v1.4 or torchvision 0.5.0. As specified in the deps, you need to downgrade to <=1.3 and <=0.4.2 (they just broke fastai v2 with their new release and we havenā€™t had time to work on a fix yet).

Oh yes, that worked (I got a different error on my actual problem after the pytorch upgrade so I thought I was making progress).

So I rescoped, but then I got a different error on MNIST, when I e.g. tried to use the example like so:

learn = cnn_learner(data, resnet50, loss_func=F.nll_loss)
learn.fit_one_cycle(epochs, lr)
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-15-7c21d2a641cb> in <module>
----> 1 learn.fit_one_cycle(epochs, lr)

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
     88     scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
     89               'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
---> 90     self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
     91 
     92 # Cell

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
    287                     try:
    288                         self.epoch=epoch;          self('begin_epoch')
--> 289                         self._do_epoch_train()
    290                         self._do_epoch_validate()
    291                     except CancelEpochException:   self('after_cancel_epoch')

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in _do_epoch_train(self)
    262         try:
    263             self.dl = self.dbunch.train_dl;                  self('begin_train')
--> 264             self.all_batches()
    265         except CancelTrainException:                         self('after_cancel_train')
    266         finally:                                             self('after_train')

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in all_batches(self)
    240     def all_batches(self):
    241         self.n_iter = len(self.dl)
--> 242         for o in enumerate(self.dl): self.one_batch(*o)
    243 
    244     def one_batch(self, i, b):

~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in one_batch(self, i, b)
    246         try:
    247             self._split(b);                                  self('begin_batch')
--> 248             self.pred = self.model(*self.xb);                self('after_pred')
    249             if len(self.yb) == 0: return
    250             self.loss = self.loss_func(self.pred, *self.yb); self('after_loss')

~/daisy-gan/venv/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    539             result = self._slow_forward(*input, **kwargs)
    540         else:
--> 541             result = self.forward(*input, **kwargs)
    542         for hook in self._forward_hooks.values():
    543             hook_result = hook(self, input, result)

~/daisy-gan/venv/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
     90     def forward(self, input):
     91         for module in self._modules.values():
---> 92             input = module(input)
     93         return input
     94 

~/daisy-gan/venv/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    539             result = self._slow_forward(*input, **kwargs)
    540         else:
--> 541             result = self.forward(*input, **kwargs)
    542         for hook in self._forward_hooks.values():
    543             hook_result = hook(self, input, result)

~/daisy-gan/venv/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
     90     def forward(self, input):
     91         for module in self._modules.values():
---> 92             input = module(input)
     93         return input
     94 

~/daisy-gan/venv/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    539             result = self._slow_forward(*input, **kwargs)
    540         else:
--> 541             result = self.forward(*input, **kwargs)
    542         for hook in self._forward_hooks.values():
    543             hook_result = hook(self, input, result)

~/daisy-gan/venv/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
    343 
    344     def forward(self, input):
--> 345         return self.conv2d_forward(input, self.weight)
    346 
    347 class Conv3d(_ConvNd):

~/daisy-gan/venv/lib/python3.6/site-packages/torch/nn/modules/conv.py in conv2d_forward(self, input, weight)
    340                             _pair(0), self.dilation, self.groups)
    341         return F.conv2d(input, weight, self.bias, self.stride,
--> 342                         self.padding, self.dilation, self.groups)
    343 
    344     def forward(self, input):

RuntimeError: Given groups=1, weight of size 64 3 7 7, expected input[256, 1, 28, 28] to have 3 channels, but got 1 channels instead

So maybe these two worlds do not yet mesh well?

You need to have three channels to use a pretrained model that expects three-channels input, this is nothing to do with fastai. An error you will have afterward is that the cnn_learner function expects a data object built with fastai to infer the number of classes (you can set that by doing data.c = 10 if you really want to use cnn_learner, this will be documented when we are at the release stage of fastai v2).

When we say you can use the training loop with PyTorch-built databunches, we only guarantee the training loop, not the rest. So you should use your own model and use the init of Learner like in this example.

@sgugger there is an issue when trying to do a show_batch on the PETs notebook. It seems when calling, it does not call the item transforms first as it should.

batch_tfms = [*aug_transforms(size=224, max_warp=0), Normalize.from_stats(*imagenet_stats)]
item_tfms = RandomResizedCrop(460, min_scale=0.75, ratio=(1.,1.))
bs=64
data = ImageDataLoaders.from_name_re(path, fnames, pat, batch_tfms=batch_tfms, 
                                   item_tfms=item_tfms, bs=bs)
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-8-08d0dd5c9f7c> in <module>()
----> 1 data.show_batch()

12 frames
/usr/local/lib/python3.6/dist-packages/fastai2/data/core.py in show_batch(self, b, max_n, ctxs, show, **kwargs)
     85 
     86     def show_batch(self, b=None, max_n=9, ctxs=None, show=True, **kwargs):
---> 87         if b is None: b = self.one_batch()
     88         if not show: return self._pre_show_batch(b, max_n=max_n)
     89         show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in one_batch(self)
    128     def one_batch(self):
    129         if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
--> 130         with self.fake_l.no_multiproc(): res = first(self)
    131         if hasattr(self, 'it'): delattr(self, 'it')
    132         return res

/usr/local/lib/python3.6/dist-packages/fastcore/utils.py in first(x)
    172 def first(x):
    173     "First element of `x`, or None if missing"
--> 174     try: return next(iter(x))
    175     except StopIteration: return None
    176 

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in __iter__(self)
     95         self.randomize()
     96         self.before_iter()
---> 97         for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
     98             if self.device is not None: b = to_device(b, self.device)
     99             yield self.after_batch(b)

/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __next__(self)
    344     def __next__(self):
    345         index = self._next_index()  # may raise StopIteration
--> 346         data = self._dataset_fetcher.fetch(index)  # may raise StopIteration
    347         if self._pin_memory:
    348             data = _utils.pin_memory.pin_memory(data)

/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
     32                 raise StopIteration
     33         else:
---> 34             data = next(self.dataset_iter)
     35         return self.collate_fn(data)
     36 

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in create_batches(self, samps)
    104         self.it = iter(self.dataset) if self.dataset is not None else None
    105         res = filter(lambda o:o is not None, map(self.do_item, samps))
--> 106         yield from map(self.do_batch, self.chunkify(res))
    107 
    108     def new(self, dataset=None, cls=None, **kwargs):

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in do_batch(self, b)
    125     def create_item(self, s):  return next(self.it) if s is None else self.dataset[s]
    126     def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
--> 127     def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
    128     def one_batch(self):
    129         if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in create_batch(self, b)
    124     def retain(self, res, b):  return retain_types(res, b[0] if is_listy(b) else b)
    125     def create_item(self, s):  return next(self.it) if s is None else self.dataset[s]
--> 126     def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
    127     def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
    128     def one_batch(self):

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in fa_collate(t)
     44     b = t[0]
     45     return (default_collate(t) if isinstance(b, _collate_types)
---> 46             else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
     47             else default_collate(t))
     48 

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in <listcomp>(.0)
     44     b = t[0]
     45     return (default_collate(t) if isinstance(b, _collate_types)
---> 46             else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
     47             else default_collate(t))
     48 

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in fa_collate(t)
     43 def fa_collate(t):
     44     b = t[0]
---> 45     return (default_collate(t) if isinstance(b, _collate_types)
     46             else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
     47             else default_collate(t))

/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/collate.py in default_collate(batch)
     53             storage = elem.storage()._new_shared(numel)
     54             out = elem.new(storage)
---> 55         return torch.stack(batch, 0, out=out)
     56     elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
     57             and elem_type.__name__ != 'string_':

RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 333 and 225 in dimension 2 at /pytorch/aten/src/TH/generic/THTensor.cpp:689

This is running dev as the most recent pip doesnā€™t include ImageDataLoaders

Edit: Also does not work when calling one_batch

You mean someone forgot to adapt the factory methods to the last version of DataBlock? Nah, thatā€™s impossible. :wink:
Should be fixed now.

1 Like

Awesome! Did the trick :wink: Also saw inklings of something interestingā€¦ a DataBlock.summary?.. :wink:

DataBlock.summary. Not ready yet but hopefully will be great for debugging/seeing what happens behind the scenes.

2 Likes

Canā€™t wait! :slight_smile:

Hi I was just trying the new release version 0.0.6 of fastai last couple of days and I noticed this behavior. Not actually a problem for me, (I am stuck on other issues), but I thought itā€™d point it out in case it is helpful:

dir(DataLoaders) has device listed twice and dir(DataLoader) has device and dataset listed twice.

I am investigating this, because my cnn_learner errors out with AttributeError: 'DataLoader' object has no attribute 'after_item', but that happens here: if getattr(dls.train_dl.after_item, 'c', False): return dls.train_dl.after_item.c so they only may be related.

Will add more safeguard here, thanks for reporting. Itā€™s going to error anyway because it canā€™t find the number of output channels in your data, but with a useful error message (pass it with n_out=...).

Iā€™m using U-net with different number of input channels (current model only accepts 3).

Here is my change:

  • added arg new_arch_in_channels to unet_learner (could find a better name)
  • added function change_body_in_channels and call it in unet_learner
  • rest is the same

Let me know if you are interested in a PR (or even require any change before)?

Here is the change in vision/learner.py

def change_body_in_channels(body, in_channels, pretrained):
    "Change first layer to match `in_channels`"
    assert not(pretrained), 'Change of input channels does not support pretrained models'
    assert body[0].__class__.__name__ == 'Conv2d', f'Change of input channels only supported with Conv2d, found {body[0].__class__.__name__}'
    prev_layer = body[0]
    # get init parameters
    params = {attr:getattr(prev_layer, attr) for attr in 'out_channels,kernel_size,stride,padding,dilation,groups,padding_mode'.split(',')}
    params['bias'] = getattr(prev_layer, 'bias') is not None
    # set number of input channels
    params['in_channels'] = in_channels
    body[0] = nn.Conv2d(**params)
    return(body)    

@delegates(Learner.__init__)
def unet_learner(dls, arch, new_arch_in_channels=None, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
    "Build a unet learner from `dls` and `arch`"
    if config is None: config = unet_config()
    meta = model_meta.get(arch, _default_meta)
    body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
    if new_arch_in_channels:
        change_body_in_channels(body, new_arch_in_channels, pretrained)
    size = dls.one_batch()[0].shape[-2:]
    model = models.unet.DynamicUnet(body, get_c(dls), size, **config)
    learn = Learner(dls, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
    if pretrained: learn.freeze()
    return learn
2 Likes

This could be useful for cnn_learner as well. Iā€™d make the first function private (it can also have a shorter name :wink: ) and use ch_in for the argument instead of new_arch_in_channels.
The only problem is that this new conv will be frozen if you use pretrained=True so we should not freeze the Learner in that case.

Sounds good. Yes I have some assert statements at the start of the function.

Iā€™ll implement the changes and will do some tests, probably taking only one dimension from MNIST.

@boris I suggest you use the param n_in for the number of input channels. I used to have some code to do this for pretrained models, but now embarassingly I canā€™t find it. Perhaps you can add this logic to your code - Iā€™ve found it works well in practice:

  • For n_in==1: take the sum of the pretrained weights to create the unit axis (e.g. going from color->b&w this works great)
  • For n_in>3: add all-zero slices for the additional channels, and leave the existing weights as-is (e.g. adding an alpha channel to an RGB pretrained model; fine-tuning will only change the weights much from zero if itā€™s actually useful)
  • For n_in==2: Delete the 3rd channel and increase the weights of the other channels by 50%
5 Likes

Thanks, at the moment I was not loading any pretrained weights but your suggestion will make it more useful. Iā€™ll do some tests and propose a change.

1 Like

@sgugger just to make you aware of a bug someone showed me, the resize bug seems to also be present on the DataBlock level too (same transforms as before):

pets = DataBlock(blocks=(ImageBlock, CategoryBlock),
                 get_items=get_image_files,
                 splitter=RandomSplitter(),
                 get_y=RegexLabeller(pat = r'/([^/]+)_\d+.*'))
dbunch = pets.dataloaders(path_im, item_tfms=item_tfms, batch_tfms=batch_tfms, bs=bs)
dbunch.show_batch(max_n=9, figsize=(6,7))

RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 456 and 375 in dimension 2 at /pytorch/aten/src/TH/generic/THTensor.cpp:689

(Full trace if you need it):

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-12-5a9f6693bc49> in <module>()
----> 1 dbunch.show_batch(max_n=9, figsize=(6,7))

12 frames
/usr/local/lib/python3.6/dist-packages/fastai2/data/core.py in show_batch(self, b, max_n, ctxs, show, **kwargs)
     87 
     88     def show_batch(self, b=None, max_n=9, ctxs=None, show=True, **kwargs):
---> 89         if b is None: b = self.one_batch()
     90         if not show: return self._pre_show_batch(b, max_n=max_n)
     91         show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in one_batch(self)
    128     def one_batch(self):
    129         if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
--> 130         with self.fake_l.no_multiproc(): res = first(self)
    131         if hasattr(self, 'it'): delattr(self, 'it')
    132         return res

/usr/local/lib/python3.6/dist-packages/fastcore/utils.py in first(x)
    172 def first(x):
    173     "First element of `x`, or None if missing"
--> 174     try: return next(iter(x))
    175     except StopIteration: return None
    176 

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in __iter__(self)
     95         self.randomize()
     96         self.before_iter()
---> 97         for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
     98             if self.device is not None: b = to_device(b, self.device)
     99             yield self.after_batch(b)

/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __next__(self)
    344     def __next__(self):
    345         index = self._next_index()  # may raise StopIteration
--> 346         data = self._dataset_fetcher.fetch(index)  # may raise StopIteration
    347         if self._pin_memory:
    348             data = _utils.pin_memory.pin_memory(data)

/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
     32                 raise StopIteration
     33         else:
---> 34             data = next(self.dataset_iter)
     35         return self.collate_fn(data)
     36 

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in create_batches(self, samps)
    104         self.it = iter(self.dataset) if self.dataset is not None else None
    105         res = filter(lambda o:o is not None, map(self.do_item, samps))
--> 106         yield from map(self.do_batch, self.chunkify(res))
    107 
    108     def new(self, dataset=None, cls=None, **kwargs):

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in do_batch(self, b)
    125     def create_item(self, s):  return next(self.it) if s is None else self.dataset[s]
    126     def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
--> 127     def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
    128     def one_batch(self):
    129         if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in create_batch(self, b)
    124     def retain(self, res, b):  return retain_types(res, b[0] if is_listy(b) else b)
    125     def create_item(self, s):  return next(self.it) if s is None else self.dataset[s]
--> 126     def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
    127     def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
    128     def one_batch(self):

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in fa_collate(t)
     44     b = t[0]
     45     return (default_collate(t) if isinstance(b, _collate_types)
---> 46             else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
     47             else default_collate(t))
     48 

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in <listcomp>(.0)
     44     b = t[0]
     45     return (default_collate(t) if isinstance(b, _collate_types)
---> 46             else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
     47             else default_collate(t))
     48 

/usr/local/lib/python3.6/dist-packages/fastai2/data/load.py in fa_collate(t)
     43 def fa_collate(t):
     44     b = t[0]
---> 45     return (default_collate(t) if isinstance(b, _collate_types)
     46             else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
     47             else default_collate(t))

/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/collate.py in default_collate(batch)
     53             storage = elem.storage()._new_shared(numel)
     54             out = elem.new(storage)
---> 55         return torch.stack(batch, 0, out=out)
     56     elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
     57             and elem_type.__name__ != 'string_':

RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 456 and 375 in dimension 2 at /pytorch/aten/src/TH/generic/THTensor.cpp:689

It is also present on the most recent pip

Yes you pass the transforms to the DataBlock now, not your call to dataloaders.

1 Like