For make a single prediction ,
- make sure you have the network built.
def classifier_model_network(dir_path, cuda_id=0):
'''
:param dir_path:
:param cuda_id:
:return:
'''
if not hasattr(torch._C, '_cuda_setDevice'):
print('CUDA not available. Setting device=-1.')
cuda_id = -1
torch.cuda.set_device(cuda_id)
dir_path = Path(dir_path)
# load vocabulary lookup
itos = pickle.load(open(dir_path / 'tmp' / 'itos.pkl', 'rb'))
n_tokens = len(itos)
dps = np.array([0.4, 0.5, 0.05, 0.3, 0.4]) * dropmult
m = get_rnn_classifier(bptt, 20 * 70, label_class, n_tokens, emb_sz=em_sz, n_hid=nh, n_layers=nl,
pad_token=1,
layers=[em_sz * 3, 50, label_class], drops=[dps[4], 0.1],
dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])
m.eval # just to make sure dropout is being applied
return m
- Fetch the model Learner .
def get_learner(dir_path, model_network, modelData, cuda_id=0):
if not hasattr(torch._C, '_cuda_setDevice'):
print('CUDA not available. Setting device=-1.')
cuda_id = -1
torch.cuda.set_device(cuda_id)
if cuda_id == -1:
map_location = 'cpu'
else:
map_location = None
dir_path = Path(dir_path)
learner = RNN_Learner(modelData, TextModel(to_gpu(model_network)))
learner.model.eval # just to make sure dropout is being applied
loaded_weights = torch.load(os.path.join(dir_path, "models/fwd_clas_1.h5"), map_location=map_location)
learner.load("fwd_clas_1")
# confirmed that the new parameters match those of the loaded model
for k, v in loaded_weights.items():
print(k, np.all(v == learner.model.state_dict()[k]))
return learner
def predict(learner: Learner, X):
return [softmax(x) for x in learner.predict_dl(create_dl(X))]
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def get_learner(dir_path,cuda_id):
network = classifier_model_network(dir_path, cuda_id)
learn = get_learner(dir_path, network,
ModelData(dir_path, None, None),
cuda_id)
return learn
- Now make the prediction with predict method.
# fetch the ids of the text
def get_tokens(sentence, lang='en'):
'''
fetch work tokens for the sentence
:param text:
:param lang:
:return:
'''
text = f'\n{BOS} {FLD} 1 ' + sentence
return Tokenizer(lang=lang).proc_text(text)
def convert2ids(tokens: list, tok2id_model_path):
itos = pickle.load(open(tok2id_model_path,'rb'))
stoi = collections.defaultdict(lambda: 0, {v: k for k, v in enumerate(itos)})
predict_lm = np.array([stoi[p] for p in tokens])
return predict_lm
dir_path= ' ' # data directory path of the model built and saved .
lm_model_path= ' ' #file path of itos.pkl file in data_dir where you have tok2id file dumped.
toks = get_tokens('some text sentence you want to predict')
ids = convert2ids(toks, lm_model_path)
learner=get_learner(dir_path,cuda_id)
predict(learner, (ids))