Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!

Hi, I’m creating an multi point image regresion model using a gpu instance in google colab. I am running into an error when running dblock.summary
Any help would be apreciated!

import fastbook
fastbook.setup_book()

from fastbook import *
from fastai.vision.widgets import *
import numpy as np
import pandas as pd
from google.colab import drive
drive.mount('/content/drive')
import csv

path = Path('/content/drive/MyDrive/Colab Notebooks/yugioh/images')
img_files = get_image_files(path)

with open('/content/drive/MyDrive/Colab Notebooks/yugioh/coords.txt', newline='') as csvfile:
  coords=[]
  spamreader = csv.reader(csvfile, delimiter=',')
  for row in spamreader:
    coords.append(row)
  coords = np.transpose(coords)
  
def get_coords(fname):
  #get num of card from file name
  cardNum = int(re.findall(r'card \((.+)\).jpg$', fname.name)[0])
  #get array containing coords for specific card
  cardCoords = coords[cardNum]
  #convert coords from array of type string into array of type integer
  cardCoords = list(map(int, cardCoords))
  cardCoords=[ [cardCoords[1],cardCoords[2]] , [cardCoords[3],cardCoords[4]] ]
  return tensor(cardCoords).cuda()

dblock = DataBlock(
    blocks=(ImageBlock, PointBlock),
    get_items=get_image_files,
    get_y=get_coords,
    batch_tfms=aug_transforms(),
    #batch_tfms=Resize(400,300),
)

dblock.summary(path,bs=12, device='cuda')

Setting-up type transforms pipelines
Collecting items from /content/drive/MyDrive/Colab Notebooks/yugioh/images
Found 66 items
2 datasets of sizes 53,13
Setting up Pipeline: PILBase.create
Setting up Pipeline: get_coords -> TensorPoint.create

Building one sample
  Pipeline: PILBase.create
    starting from
      /content/drive/MyDrive/Colab Notebooks/yugioh/images/card (60).jpg
    applying PILBase.create gives
      PILImage mode=RGB size=4032x3024
  Pipeline: get_coords -> TensorPoint.create
    starting from
      /content/drive/MyDrive/Colab Notebooks/yugioh/images/card (60).jpg
    applying get_coords gives
      Tensor of size 2x2
    applying TensorPoint.create gives
      TensorPoint of size 2x2

Final sample: (PILImage mode=RGB size=4032x3024, TensorPoint([[2980.,  672.],
        [3056., 1096.]], device='cuda:0'))


Collecting items from /content/drive/MyDrive/Colab Notebooks/yugioh/images
Found 66 items
2 datasets of sizes 53,13
Setting up Pipeline: PILBase.create
Setting up Pipeline: get_coords -> TensorPoint.create
Setting up after_item: Pipeline: PointScaler -> ToTensor
Setting up before_batch: Pipeline: 
Setting up after_batch: Pipeline: IntToFloatTensor -- {'div': 255.0, 'div_mask': 1} -> Flip -- {'size': None, 'mode': 'bilinear', 'pad_mode': 'reflection', 'mode_mask': 'nearest', 'align_corners': True, 'p': 0.5} -> Brightness -- {'max_lighting': 0.2, 'p': 1.0, 'draw': None, 'batch': False}
Could not do one pass in your dataloader, there is something wrong in it. Please see the stack trace below:

---------------------------------------------------------------------------

RuntimeError                              Traceback (most recent call last)

<ipython-input-11-b5fb000c8a2c> in <module>()
----> 1 dblock.summary(path,bs=12, device='cuda')

18 frames

/usr/local/lib/python3.7/dist-packages/fastai/data/block.py in summary(self, source, bs, show_batch, **kwargs)
    164     print(f"\nFinal sample: {dsets.train[0]}\n\n")
    165 
--> 166     dls = self.dataloaders(source, bs=bs, verbose=True)
    167     print("\nBuilding one batch")
    168     if len([f for f in dls.train.after_item.fs if f.name != 'noop'])!=0:

/usr/local/lib/python3.7/dist-packages/fastai/data/block.py in dataloaders(self, source, path, verbose, **kwargs)
    113         dsets = self.datasets(source, verbose=verbose)
    114         kwargs = {**self.dls_kwargs, **kwargs, 'verbose': verbose}
--> 115         return dsets.dataloaders(path=path, after_item=self.item_tfms, after_batch=self.batch_tfms, **kwargs)
    116 
    117     _docs = dict(new="Create a new `DataBlock` with other `item_tfms` and `batch_tfms`",

/usr/local/lib/python3.7/dist-packages/fastai/data/core.py in dataloaders(self, bs, shuffle_train, shuffle, val_shuffle, n, path, dl_type, dl_kwargs, device, drop_last, val_bs, **kwargs)
    235         def_kwargs = {'bs':bs if val_bs is None else val_bs,'shuffle':val_shuffle,'n':None,'drop_last':False}
    236         dls = [dl] + [dl.new(self.subset(i), **merge(kwargs,def_kwargs,val_kwargs,dl_kwargs[i]))
--> 237                       for i in range(1, self.n_subsets)]
    238         return self._dbunch_type(*dls, path=path, device=device)
    239 

/usr/local/lib/python3.7/dist-packages/fastai/data/core.py in <listcomp>(.0)
    235         def_kwargs = {'bs':bs if val_bs is None else val_bs,'shuffle':val_shuffle,'n':None,'drop_last':False}
    236         dls = [dl] + [dl.new(self.subset(i), **merge(kwargs,def_kwargs,val_kwargs,dl_kwargs[i]))
--> 237                       for i in range(1, self.n_subsets)]
    238         return self._dbunch_type(*dls, path=path, device=device)
    239 

/usr/local/lib/python3.7/dist-packages/fastai/data/core.py in new(self, dataset, cls, **kwargs)
     64         if not hasattr(self, '_n_inp') or not hasattr(self, '_types'):
     65             try:
---> 66                 self._one_pass()
     67                 res._n_inp,res._types = self._n_inp,self._types
     68             except Exception as e:

/usr/local/lib/python3.7/dist-packages/fastai/data/core.py in _one_pass(self)
     49 
     50     def _one_pass(self):
---> 51         b = self.do_batch([self.do_item(None)])
     52         if self.device is not None: b = to_device(b, self.device)
     53         its = self.after_batch(b)

/usr/local/lib/python3.7/dist-packages/fastai/data/load.py in do_item(self, s)
    149     def prebatched(self): return self.bs is None
    150     def do_item(self, s):
--> 151         try: return self.after_item(self.create_item(s))
    152         except SkipItemException: return None
    153     def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)

/usr/local/lib/python3.7/dist-packages/fastcore/transform.py in __call__(self, o)
    198         self.fs = self.fs.sorted(key='order')
    199 
--> 200     def __call__(self, o): return compose_tfms(o, tfms=self.fs, split_idx=self.split_idx)
    201     def __repr__(self): return f"Pipeline: {' -> '.join([f.name for f in self.fs if f.name != 'noop'])}"
    202     def __getitem__(self,i): return self.fs[i]

/usr/local/lib/python3.7/dist-packages/fastcore/transform.py in compose_tfms(x, tfms, is_enc, reverse, **kwargs)
    148     for f in tfms:
    149         if not is_enc: f = f.decode
--> 150         x = f(x, **kwargs)
    151     return x
    152 

/usr/local/lib/python3.7/dist-packages/fastcore/transform.py in __call__(self, x, **kwargs)
     71     @property
     72     def name(self): return getattr(self, '_name', _get_name(self))
---> 73     def __call__(self, x, **kwargs): return self._call('encodes', x, **kwargs)
     74     def decode  (self, x, **kwargs): return self._call('decodes', x, **kwargs)
     75     def __repr__(self): return f'{self.name}:\nencodes: {self.encodes}decodes: {self.decodes}'

/usr/local/lib/python3.7/dist-packages/fastcore/transform.py in _call(self, fn, x, split_idx, **kwargs)
     81     def _call(self, fn, x, split_idx=None, **kwargs):
     82         if split_idx!=self.split_idx and self.split_idx is not None: return x
---> 83         return self._do_call(getattr(self, fn), x, **kwargs)
     84 
     85     def _do_call(self, f, x, **kwargs):

/usr/local/lib/python3.7/dist-packages/fastcore/transform.py in _do_call(self, f, x, **kwargs)
     88             ret = f.returns(x) if hasattr(f,'returns') else None
     89             return retain_type(f(x, **kwargs), x, ret)
---> 90         res = tuple(self._do_call(f, x_, **kwargs) for x_ in x)
     91         return retain_type(res, x)
     92 

/usr/local/lib/python3.7/dist-packages/fastcore/transform.py in <genexpr>(.0)
     88             ret = f.returns(x) if hasattr(f,'returns') else None
     89             return retain_type(f(x, **kwargs), x, ret)
---> 90         res = tuple(self._do_call(f, x_, **kwargs) for x_ in x)
     91         return retain_type(res, x)
     92 

/usr/local/lib/python3.7/dist-packages/fastcore/transform.py in _do_call(self, f, x, **kwargs)
     87             if f is None: return x
     88             ret = f.returns(x) if hasattr(f,'returns') else None
---> 89             return retain_type(f(x, **kwargs), x, ret)
     90         res = tuple(self._do_call(f, x_, **kwargs) for x_ in x)
     91         return retain_type(res, x)

/usr/local/lib/python3.7/dist-packages/fastcore/dispatch.py in __call__(self, *args, **kwargs)
    121         elif self.inst is not None: f = MethodType(f, self.inst)
    122         elif self.owner is not None: f = MethodType(f, self.owner)
--> 123         return f(*args, **kwargs)
    124 
    125     def __get__(self, inst, owner):

/usr/local/lib/python3.7/dist-packages/fastai/vision/core.py in encodes(self, x)
    251     def decodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
    252 
--> 253     def encodes(self, x:TensorPoint): return _scale_pnts(x, self._get_sz(x), self.do_scale, self.y_first)
    254     def decodes(self, x:TensorPoint): return _unscale_pnts(x.view(-1, 2), self._get_sz(x))
    255 

/usr/local/lib/python3.7/dist-packages/fastai/vision/core.py in _scale_pnts(y, sz, do_scale, y_first)
    228 def _scale_pnts(y, sz, do_scale=True, y_first=False):
    229     if y_first: y = y.flip(1)
--> 230     res = y * 2/tensor(sz).float() - 1 if do_scale else y
    231     return TensorPoint(res, img_size=sz)
    232 

/usr/local/lib/python3.7/dist-packages/fastai/torch_core.py in __torch_function__(self, func, types, args, kwargs)
    339         convert=False
    340         if _torch_handled(args, self._opt, func): convert,types = type(self),(torch.Tensor,)
--> 341         res = super().__torch_function__(func, types, args=args, kwargs=kwargs)
    342         if convert: res = convert(res)
    343         if isinstance(res, TensorBase): res.set_meta(self, as_copy=True)

/usr/local/lib/python3.7/dist-packages/torch/_tensor.py in __torch_function__(cls, func, types, args, kwargs)
   1140 
   1141         with _C.DisableTorchFunction():
-> 1142             ret = func(*args, **kwargs)
   1143             if func in get_default_nowrap_functions():
   1144                 return ret

RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
1 Like

Hey instead of quotes you should use a triple backslash “```” for code blocks so it is more readable.

def whatever(a):
    pass

Sorry, I am not sure why you get the error but maybe other people will find it easier to help with better formatting.

1 Like

I wonder if you still get a similar error if you try to do everything on the CPU.

dblock.summary(path, bs=12, device='cpu')

and in get_coords just return tensor(cardCoords)

1 Like

Thank you for your help, changing just the get_coords function fixed the error!