Fast AI v2 Text Tokenization with tok_tfm

I’m having a difficult time understanding how to use a custom text tokenizer using the tok_tfm parameter:

My goal is simply to replicate the functionality from fastai version 1.

Fast AI V1:

all_letters = list(string.printable + string.whitespace)

customtokenizer = Tokenizer(pre_rules= [], post_rules=[])

processors = [TokenizeProcessor(tokenizer=customtokenizer, mark_fields=False),
NumericalizeProcessor(vocab=Vocab.create(all_letters, max_vocab=1000, min_freq=0))]

data = (TextList.from_csv(path, “songs_8.csv”, cols=‘text’, processor=processors)
.split_by_rand_pct(0.2)
.label_for_lm()
.databunch(bs=96))
data.save(‘data_block_lm4.pkl’)

Fast AI V2:

dls = TextDataLoaders.from_csv(path=path, bs=8, header=“infer”, csv_fname=‘songs_8.csv’, text_col=‘text’, label_col=‘label’, is_lm=True, tok_tfm=noop)

---------------------------------------------------------------------------

AttributeError Traceback (most recent call last)
in
----> 1 dls.show_batch()

~\anaconda3\lib\site-packages\fastai\data\core.py in show_batch(self, b, max_n, ctxs, show, unique, **kwargs)
100 if b is None: b = self.one_batch()
101 if not show: return self._pre_show_batch(b, max_n=max_n)
–> 102 show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)
103 if unique: self.get_idxs = old_get_idxs
104

~\anaconda3\lib\site-packages\fastcore\dispatch.py in call(self, *args, **kwargs)
108 if not f: return args[0]
109 if self.inst is not None: f = MethodType(f, self.inst)
–> 110 return f(*args, **kwargs)
111
112 def get(self, inst, owner):

~\anaconda3\lib\site-packages\fastai\text\data.py in show_batch(x, y, samples, ctxs, max_n, trunc_at, **kwargs)
118 @typedispatch
119 def show_batch(x: LMTensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
–> 120 samples = L((s[0].truncate(trunc_at), s[1].truncate(trunc_at)) for s in samples)
121 return show_batch[TensorText](x, None, samples, ctxs=ctxs, max_n=max_n, trunc_at=None, **kwargs)
122

~\anaconda3\lib\site-packages\fastcore\foundation.py in call(cls, x, args, **kwargs)
49 return x
50
—> 51 res = super().call(
((x,) + args), **kwargs)
52 res._newchk = 0
53 return res

~\anaconda3\lib\site-packages\fastcore\foundation.py in init(self, items, use_list, match, *rest)
331 if items is None: items = []
332 if (use_list is not None) or not _is_array(items):
–> 333 items = list(items) if use_list else _listify(items)
334 if match is not None:
335 if is_coll(match): match = len(match)

~\anaconda3\lib\site-packages\fastcore\foundation.py in _listify(o)
244 if isinstance(o, list): return o
245 if isinstance(o, str) or _is_array(o): return [o]
–> 246 if is_iter(o): return list(o)
247 return [o]
248

~\anaconda3\lib\site-packages\fastai\text\data.py in (.0)
118 @typedispatch
119 def show_batch(x: LMTensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
–> 120 samples = L((s[0].truncate(trunc_at), s[1].truncate(trunc_at)) for s in samples)
121 return show_batch[TensorText](x, None, samples, ctxs=ctxs, max_n=max_n, trunc_at=None, **kwargs)
122

AttributeError: ‘L’ object has no attribute ‘truncate’

I’m confused, why don’t you pass your custom tokenizer to tok_tfm? Or am I missing something?

You just need to initalize the tokenizer and then pass it to tok_tfm so that when its called it will do the tokenization.

Something like:

my_tok = MyTokenizer(**kwargs)

dls = TextDataLoaders.from_csv(path=path, bs=8, header=“infer”, csv_fname=‘songs_8.csv’, text_col=‘text’, label_col=‘label’, is_lm=True, tok_tfm=my_tok)

When my_tok is called it should be able to accept items to be tokenized, like you can see with the default SpacyTokenizer:

class SpacyTokenizer():
    "Spacy tokenizer for `lang`"
    def __init__(self, lang='en', special_toks=None, buf_sz=5000):
        self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
        nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
        for w in self.special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
        self.pipe,self.buf_sz = nlp.pipe,buf_sz

    def __call__(self, items):
        return (L(doc).attrgot('text') for doc in self.pipe(map(str,items), batch_size=self.buf_sz))

tok = SpacyTokenizer()

dls = TextDataLoaders.from_csv(path=path, bs=8, header=“infer”, csv_fname=‘songs_8.csv’, text_col=‘text’, label_col=‘label’, is_lm=True, tok_tfm=tok)

Results in the following error:


TypeError Traceback (most recent call last)
in
----> 1 dls = TextDataLoaders.from_csv(path=path, bs=8, header=“infer”, csv_fname=‘songs_8.csv’, text_col=‘text’, label_col=‘label’, is_lm=True, tok_tfm=tok)

~\anaconda3\lib\site-packages\fastai\text\data.py in from_csv(cls, path, csv_fname, header, delimiter, **kwargs)
251 "Create from csv file in path/csv_fname"
252 df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
–> 253 return cls.from_df(df, path=path, **kwargs)
254
255 TextDataLoaders.from_csv = delegates(to=TextDataLoaders.from_df)(TextDataLoaders.from_csv)

~\anaconda3\lib\site-packages\fastai\text\data.py in from_df(cls, df, path, valid_pct, seed, text_col, label_col, label_delim, y_block, text_vocab, is_lm, valid_col, tok_tfm, seq_len, backwards, **kwargs)
245 get_y=None if is_lm else ColReader(label_col, label_delim=label_delim),
246 splitter=splitter)
–> 247 return cls.from_dblock(dblock, df, path=path, seq_len=seq_len, **kwargs)
248
249 @classmethod

~\anaconda3\lib\site-packages\fastai\data\core.py in from_dblock(cls, dblock, source, path, bs, val_bs, shuffle_train, device, **kwargs)
178 @classmethod
179 def from_dblock(cls, dblock, source, path=’.’, bs=64, val_bs=None, shuffle_train=True, device=None, **kwargs):
–> 180 return dblock.dataloaders(source, path=path, bs=bs, val_bs=val_bs, shuffle_train=shuffle_train, device=device, **kwargs)
181
182 _docs=dict(getitem=“Retrieve DataLoader at i (0 is training, 1 is validation)”,

~\anaconda3\lib\site-packages\fastai\data\block.py in dataloaders(self, source, path, verbose, **kwargs)
111
112 def dataloaders(self, source, path=’.’, verbose=False, **kwargs):
–> 113 dsets = self.datasets(source)
114 kwargs = {**self.dls_kwargs, **kwargs, ‘verbose’: verbose}
115 return dsets.dataloaders(path=path, after_item=self.item_tfms, after_batch=self.batch_tfms, **kwargs)

~\anaconda3\lib\site-packages\fastai\data\block.py in datasets(self, source, verbose)
108 splits = (self.splitter or RandomSplitter())(items)
109 pv(f"{len(splits)} datasets of sizes {’,’.join([str(len(s)) for s in splits])}", verbose)
–> 110 return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
111
112 def dataloaders(self, source, path=’.’, verbose=False, **kwargs):

~\anaconda3\lib\site-packages\fastai\data\core.py in init(self, items, tfms, tls, n_inp, dl_type, **kwargs)
308 def init(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
309 super().init(dl_type=dl_type)
–> 310 self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
311 self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
312

~\anaconda3\lib\site-packages\fastai\data\core.py in (.0)
308 def init(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
309 super().init(dl_type=dl_type)
–> 310 self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
311 self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
312

~\anaconda3\lib\site-packages\fastcore\foundation.py in call(cls, x, args, **kwargs)
49 return x
50
—> 51 res = super().call(
((x,) + args), **kwargs)
52 res._newchk = 0
53 return res

~\anaconda3\lib\site-packages\fastai\data\core.py in init(self, items, tfms, use_list, do_setup, split_idx, train_setup, splits, types, verbose, dl_type)
234 if do_setup:
235 pv(f"Setting up {self.tfms}", verbose)
–> 236 self.setup(train_setup=train_setup)
237
238 def _new(self, items, split_idx=None, **kwargs):

~\anaconda3\lib\site-packages\fastai\data\core.py in setup(self, train_setup)
250
251 def setup(self, train_setup=True):
–> 252 self.tfms.setup(self, train_setup)
253 if len(self) != 0:
254 x = super().getitem(0) if self.splits is None else super().getitem(self.splits[0])[0]

~\anaconda3\lib\site-packages\fastcore\transform.py in setup(self, items, train_setup)
189 tfms = self.fs[:]
190 self.fs.clear()
–> 191 for t in tfms: self.add(t,items, train_setup)
192
193 def add(self,t, items=None, train_setup=False):

~\anaconda3\lib\site-packages\fastcore\transform.py in add(self, t, items, train_setup)
192
193 def add(self,t, items=None, train_setup=False):
–> 194 t.setup(items, train_setup)
195 self.fs.append(t)
196

~\anaconda3\lib\site-packages\fastcore\transform.py in setup(self, items, train_setup)
76 def setup(self, items=None, train_setup=False):
77 train_setup = train_setup if self.train_setup is None else self.train_setup
—> 78 return self.setups(getattr(items, ‘train’, items) if train_setup else items)
79
80 def _call(self, fn, x, split_idx=None, **kwargs):

~\anaconda3\lib\site-packages\fastcore\dispatch.py in call(self, *args, **kwargs)
108 if not f: return args[0]
109 if self.inst is not None: f = MethodType(f, self.inst)
–> 110 return f(*args, **kwargs)
111
112 def get(self, inst, owner):

~\anaconda3\lib\site-packages\fastai\text\data.py in setups(self, dsets)
40 if dsets is None: return
41 if self.vocab is None:
—> 42 count = dsets.counter if getattr(dsets, ‘counter’, None) is not None else Counter(p for o in dsets for p in o)
43 if self.special_toks is None and hasattr(dsets, ‘special_toks’):
44 self.special_toks = dsets.special_toks

~\anaconda3\lib\collections_init_.py in init(*args, **kwds)
566 raise TypeError(‘expected at most 1 arguments, got %d’ % len(args))
567 super(Counter, self).init()
–> 568 self.update(*args, **kwds)
569
570 def missing(self, key):

~\anaconda3\lib\collections_init_.py in update(*args, **kwds)
653 super(Counter, self).update(iterable) # fast path when counter is empty
654 else:
–> 655 _count_elements(self, iterable)
656 if kwds:
657 self.update(kwds)

TypeError: unhashable type: ‘L’