A2 Voila PosixPath attribute 'tell' error

I keep getting this attribute error with Voila running locally. I am running the notebook in Voila with a requirements.txt file, image file, and an export.pkl file as well. I’m using Python version 3.7.8 and have the packages installed in a Conda environment.

AttributeError Traceback (most recent call last)
in
18 # Code that is run and does something
19 defaults.device = torch.device(‘cpu’)
—> 20 learn = load_learner(".")
21
22 # Notes

~/anaconda3/envs/fastai37/lib/python3.7/site-packages/fastai/basic_train.py in load_learner(path, file, test, tfm_y, **db_kwargs)
619 “Load a Learner object saved with export_state in path/file with empty data, optionally add test and load on cpu. file can be file-like (file or buffer)”
620 source = Path(path)/file if is_pathlike(file) else file
–> 621 state = torch.load(source, map_location=‘cpu’) if defaults.device == torch.device(‘cpu’) else torch.load(source)
622 model = state.pop(‘model’)
623 src = LabelLists.load_state(path, state.pop(‘data’))

~/anaconda3/envs/fastai37/lib/python3.7/site-packages/torch/serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
525 with _open_file_like(f, ‘rb’) as opened_file:
526 if _is_zipfile(opened_file):
–> 527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)

~/anaconda3/envs/fastai37/lib/python3.7/site-packages/torch/serialization.py in init(self, name_or_buffer)
222 class _open_zipfile_reader(_opener):
223 def init(self, name_or_buffer):
–> 224 super(_open_zipfile_reader, self).init(torch._C.PyTorchFileReader(name_or_buffer))
225
226

AttributeError: ‘PosixPath’ object has no attribute ‘tell’
AttributeError: ‘PosixPath’ object has no attribute ‘tell’

Here’s my ipynb code:


%matplotlib inline

from IPython.display import clear_output, Image

from matplotlib.pyplot import imshow, axis, show
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

from ipywidgets import interact, interactive, fixed, interact_manual, Layout
import ipywidgets as widgets
import re

from fastai.vision import *
import urllib.request

import warnings

# Code that is run and does something
defaults.device = torch.device('cpu')
learn = load_learner(".")

# Notes
# This webapp may be vulnerable to large files that are downloaded from the URLs
# or from direct file uploads. I'm not sure how to handle this.

# Widget for receiving an image link
link1 = widgets.Text(
    description = "Image link: ",
    placeholder = "Type image link here"
)

# Widget that downloads the image when clicked
download1 = widgets.Button(
    description = 'Download!',
    disabled = False,
    button_style = '', # 'success', 'info', 'warning', 'danger' or ''
    tooltip = 'Download the image supplied by the link',
)

def dl_img(event):
  with d1_out:
    d1_out.clear_output()
    # Save the image to 'image.png'
    bla = link1
    # Check that input is a string
    if(type(bla.value) is not str):
      print("Error, link must be a string!")
    # Check that input link leads to an image
    elif(re.search('\.((png)|(jpg))', bla.value) is None):
      print("URL must link to a png or jpg file!")
    else:
      try: # Check that there is an image at the link
        urllib.request.urlretrieve(bla.value, 'image.png')
        print('Image downloaded!')
        # Show the image (assumes that there is a file)
        try:
          file = open("./image.png", "rb")
          image1 = file.read()
        except:
          file = open("./default.png", "rb")
          image1 = file.read()
        finally:
          # This isn't showing up for some reason
          img_out = widgets.HBox([widgets.Image(value = image1,
                                      format = 'url')])
      except: # If the image in the link is not found
        print("image not found :(")
download1.on_click(dl_img)

# Widget that classifies the image when clicked
classify1 = widgets.Button(
    description = 'Classify!',
    disabled = False,
    button_style = '', # 'success', 'info', 'warning', 'danger' or ''
    tooltip = 'Classify the image as "draft" or "not draft"',
)

def classify_img(event):
  with c1_out:
    c1_out.clear_output()
    try:
      img = open_image(Path('./image.png'))
    except:
      img = open_image(Path('./default.png'))
      print("Image not found, default loaded instead")
    pred_class,pred_idx,outputs = learn.predict(img)
    print('Class: ' + pred_class.obj)

with warnings.catch_warnings():
  # I'm hoping that this only ignores warnings from the classify
  # button because I'm getting warnings about upsampling from pytorch.
  # I tried, but couldn't find a way to test if warnings from other parts
  # of the code are suppressed.
    warnings.filterwarnings('ignore')
    classify1.on_click(classify_img)

# Widget that shows the downloaded image
img1 = widgets.Button(
    description = 'Show image',
    disabled = False,
    button_style = '', # 'success', 'info', 'warning', 'danger' or ''
    tooltip = 'View the image',
)

def view_img(event):
  with img_out:
    img_out.clear_output(True)
    try:
      img = mpimg.imread('image.png')
    except:
      print("Image not found, default loaded instead")
      img = mpimg.imread('default.png')
    imgplot = plt.imshow(img)
    plt.axis('off')
    plt.show()
img1.on_click(view_img)
    

## Output widgets
# Show the link that's being received
def f1(link):
  print(' Link received: {}'.format(link))
out = widgets.interactive_output(f1, {'link': link1})

# Print statement that confirms the download
d1_out = widgets.Output(layout = Layout(margin='auto'))

# Print statement that shows the classification
c1_out = widgets.Output(layout = Layout(margin='auto'))

# Print image
img_out = widgets.Output(layout = Layout(margin = 'auto'))

# Layout of all the widgets
widgets.VBox([
              widgets.HBox([widgets.VBox([link1, download1, classify1]),
              widgets.VBox([out, d1_out, c1_out])]),
              widgets.HBox([img1, img_out])
])

Any tips?

I tried this as well:


import urllib.request

from fastai.vision import *

defaults.device = torch.device('cpu')

learner = load_learner('.', "export.pkl")

It still gave me the same error in Voila.

Here’s the requirements.txt file if it helps:


appnope==0.1.0
argon2-cffi @ file:///Users/runner/miniforge3/conda-bld/argon2-cffi_1596629848887/work
async-generator==1.10
attrs==19.3.0
backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
backports.functools-lru-cache==1.6.1
beautifulsoup4 @ file:///Users/runner/miniforge3/conda-bld/beautifulsoup4_1589760921411/work
bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1588608214987/work
blis==0.4.1
Bottleneck==1.3.2
brotlipy==0.7.0
catalogue==1.0.0
certifi==2020.6.20
cffi @ file:///Users/runner/miniforge3/conda-bld/cffi_1595805544857/work
chardet==3.0.4
cryptography @ file:///Users/runner/miniforge3/conda-bld/cryptography_1595348799213/work
cycler==0.10.0
cymem @ file:///Users/runner/miniforge3/conda-bld/cymem_1591801134721/work
decorator==4.4.2
defusedxml==0.6.0
entrypoints==0.3
fastai==1.0.61
fastprogress @ file:///home/jhoward/anaconda3/conda-bld/fastprogress_1597084392729/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1593328102638/work
importlib-metadata @ file:///Users/runner/miniforge3/conda-bld/importlib-metadata_1593211384223/work
ipykernel @ file:///Users/runner/miniforge3/conda-bld/ipykernel_1595446887249/work/dist/ipykernel-5.3.4-py3-none-any.whl
ipython @ file:///Users/runner/miniforge3/conda-bld/ipython_1596256315167/work
ipython-genutils==0.2.0
ipywidgets==7.5.1
jedi @ file:///Users/runner/miniforge3/conda-bld/jedi_1595018286626/work
Jinja2==2.11.2
jsonschema==3.2.0
jupyter-client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1594732094290/work
jupyter-console==6.1.0
jupyter-core==4.6.3
jupyter-server==0.1.1
jupyterlab-pygments==0.1.1
kiwisolver==1.2.0
MarkupSafe==1.1.1
matplotlib @ file:///Users/runner/miniforge3/conda-bld/matplotlib-base_1595353072824/work
mistune==0.8.4
murmurhash==1.0.0
nbconvert==5.6.1
nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1594060262917/work
notebook @ file:///Users/runner/miniforge3/conda-bld/notebook_1596502482596/work
numexpr==2.7.1
numpy @ file:///Users/runner/miniforge3/conda-bld/numpy_1595522909688/work
olefile==0.46
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1589925210001/work
pandas @ file:///Users/runner/miniforge3/conda-bld/pandas_1595958476229/work
pandocfilters==1.4.2
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1595548966091/work
pexpect==4.8.0
pickleshare==0.7.5
Pillow @ file:///Users/runner/miniforge3/conda-bld/pillow_1594212133169/work
plac==0.9.6
preshed @ file:///Users/runner/miniforge3/conda-bld/preshed_1591802150273/work
prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1590412252446/work
prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1597061474901/work
ptyprocess==0.6.0
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1593275161868/work
Pygments==2.6.1
pyOpenSSL==19.1.0
pyparsing==2.4.7
PyQt5==5.12.3
PyQt5-sip==4.19.18
PyQtChart==5.12
PyQtWebEngine==5.12.1
pyrsistent==0.16.0
PySocks==1.7.1
python-dateutil==2.8.1
pytz==2020.1
PyYAML==5.3.1
pyzmq==19.0.2
qtconsole @ file:///home/conda/feedstock_root/build_artifacts/qtconsole_1592845750760/work
QtPy==1.9.0
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1592425495151/work
scipy @ file:///Users/runner/miniforge3/conda-bld/scipy_1595583590545/work
Send2Trash==1.5.0
six @ file:///home/conda/feedstock_root/build_artifacts/six_1590081179328/work
soupsieve @ file:///Users/runner/miniforge3/conda-bld/soupsieve_1589778513658/work
spacy @ file:///Users/runner/miniforge3/conda-bld/spacy_1594659165489/work
srsly @ file:///Users/runner/miniforge3/conda-bld/srsly_1589222440335/work
terminado==0.8.3
testpath==0.4.4
thinc @ file:///Users/runner/miniforge3/conda-bld/thinc_1590400417741/work
torch==1.4.0
torchvision==0.5.0
tornado==6.0.4
tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1596476591553/work
traitlets==4.3.3
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1595434816409/work
voila==0.1.21
wasabi @ file:///home/conda/feedstock_root/build_artifacts/wasabi_1594929739383/work
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1595859607677/work
webencodings==0.5.1
widgetsnbextension @ file:///Users/runner/miniforge3/conda-bld/widgetsnbextension_1594164361045/work
zipp==3.1.0

I’ve managed to fix the issue. The issue was cause by incorrect versions of different modules. If you are running your web app on Google Colab and then testing it locally with Voila and get errors, you need to check that the versions of the modules/Python match locally and in Google Colab. At the time I’m running this, Google Colab is using Python version 3.6.9, Pytorch version 1.6.0, and Torchvision 0.7.0.

To check the version of Python in Google Colab’s python notebook, you can do sys.version. To check the version of a module, you can do help('packageName'). For example, to check jupyter’s version, you would write help('jupyter').

I installed everything locally in Conda and found that it would stay with Pytorch 1.4.0 and Torchvision 0.5.0 if I didn’t install it first, so install those two first. Here are the steps I used to create a conda environment that ensured that it installed the newer versions of pytorch and torchvision:
conda create -n fastai369 python=3.6.9
conda activate fastai369
conda install -c pytorch torchvision=0.7.0
conda install -c fastai fastai
conda install jupyter
conda install voila

To check the versions of the modules in your local conda python, you can do:
pip freeze
to list everything in the terminal, or
pip freeze > requirements.txt
to list everything in a text file.

1 Like

From pytorch forum: https://discuss.pytorch.org/t/error-when-loading-model-trained-with-python3-7-pytorch-0-4-1-using-python2-7-pytorch-0-4-1/30931