Lesson 2: Planets - Parse Error

I’ve been able to run each notebook, up to this point without any issues.

When I attempt to run Lesson 2 about multilabel classification of the planets satellite images, i get the following error while trying to run this code:

data = get_data(256)

Error:


ParserError Traceback (most recent call last)
in ()
----> 1 data = get_data(256)

in get_data(sz)
2 tfms = tfms_from_model(f_model, sz, aug_tfms=transforms_top_down, max_zoom=1.05)
3 return ImageClassifierData.from_csv(PATH, ‘train’, label_csv, tfms=tfms,
----> 4 suffix=’.jpg’, val_idxs=val_idxs, test_name=‘test’)

~/ssd/fastai/courses/dl1/fastai/dataset.py in from_csv(cls, path, folder, csv_fname, bs, tfms, val_idxs, suffix, test_name, continuous, skip_header, num_workers, cat_separator)
521 assert not (tfms[0] is None or tfms[1] is None), “please provide transformations for your train and validation sets”
522 assert not (os.path.isabs(folder)), “folder needs to be a relative path”
–> 523 fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous, cat_separator=cat_separator)
524 return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name,
525 num_workers=num_workers, suffix=suffix, tfms=tfms, bs=bs, continuous=continuous)

~/ssd/fastai/courses/dl1/fastai/dataset.py in csv_source(folder, csv_file, skip_header, suffix, continuous, cat_separator)
162
163 def csv_source(folder, csv_file, skip_header=True, suffix=’’, continuous=False, cat_separator=’ '):
–> 164 fnames,csv_labels = parse_csv_labels(csv_file, skip_header, cat_separator)
165 return dict_source(folder, fnames, csv_labels, suffix, continuous)
166

~/ssd/fastai/courses/dl1/fastai/dataset.py in parse_csv_labels(fn, skip_header, cat_separator)
150 :param cat_separator: the separator for the categories column
151 “”"
–> 152 df = pd.read_csv(fn, index_col=0, header=0 if skip_header else None, dtype=str)
153 fnames = df.index.values
154 df.iloc[:,0] = df.iloc[:,0].str.split(cat_separator)

~/anaconda3/envs/fastai/lib/python3.6/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skipfooter, doublequote, delim_whitespace, low_memory, memory_map, float_precision)
676 skip_blank_lines=skip_blank_lines)
677
–> 678 return _read(filepath_or_buffer, kwds)
679
680 parser_f.name = name

~/anaconda3/envs/fastai/lib/python3.6/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds)
444
445 try:
–> 446 data = parser.read(nrows)
447 finally:
448 parser.close()

~/anaconda3/envs/fastai/lib/python3.6/site-packages/pandas/io/parsers.py in read(self, nrows)
1034 raise ValueError(‘skipfooter not supported for iteration’)
1035
-> 1036 ret = self._engine.read(nrows)
1037
1038 # May alter columns / col_dict

~/anaconda3/envs/fastai/lib/python3.6/site-packages/pandas/io/parsers.py in read(self, nrows)
1846 def read(self, nrows=None):
1847 try:
-> 1848 data = self._reader.read(nrows)
1849 except StopIteration:
1850 if self._first_chunk:

pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.read()

pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory()

pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_rows()

pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._tokenize_rows()

pandas/_libs/parsers.pyx in pandas._libs.parsers.raise_parser_error()

ParserError: Error tokenizing data. C error: Expected 1 fields in line 6, saw 2

Any ideas on what could be going wrong here? I’m at a loss…

Disregard. It was a corrupt file download. Another download of the csv fixed it.