Below is test training model code, but I don't know how to input custom audio identification:
The import OS
The import difflib
The import tensorflow as tf
The import numpy as np
The from utils import decode_ctc, GetEditDistance
# 0. Ready to decode the dictionary, parameters should be in line and training, and can also be saved to the local dictionary, direct read
The from utils import get_data, data_hparams
Data_args=data_hparams ()
Train_data=https://bbs.csdn.net/topics/get_data (data_args)
Acoustic model # 1. -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
The from model_speech. Cnn_ctc import Am, am_hparams
Am_args=am_hparams ()
Am_args. Vocab_size=len (train_data. Am_vocab)
Am=am (am_args)
Print (' loading & model... ')
Am. Ctc_model. Load_weights (' logs_am/model. The h5)
# 2. The language model -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
The from model_language. Transformer import Lm, lm_hparams
Lm_args=lm_hparams ()
Lm_args. Input_vocab_size=len (train_data. Pny_vocab)
Lm_args. Label_vocab_size=len (train_data. Han_vocab)
Lm_args. Dropout_rate=0.
Print (' loading language model... ')
Lm=lm (lm_args)
Sess=tf. The Session (graph=lm. Graph)
With lm. Graph. As_default () :
Saver=tf. Train. Saver ()
With sess. As_default () :
Latest=tf. Train. Latest_checkpoint (' logs_lm)
Saver. Restore (sess, latest)
# 3. Prepare test data needed, don't have to agree and training data, by setting the data_args. Data_type test,
# should be set to 'test' here, because I use the 'train' presentation model is small, if you use the 'test' can't see effect,
# and there will be a word does not appear,
Data_args. Data_type='test'
Data_args. Shuffle=False
Data_args. Batch_size=1
Test_data=https://bbs.csdn.net/topics/get_data (data_args)
Test # 4. -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
Am_batch=test_data. Get_am_batch ()
Word_num=0
Word_error_num=0
For I in range (10) :
Print (' \ n the 'I,' th example.)
# load the trained model, which can identify and
Inputs, _=next (am_batch)
X=inputs [' the_inputs]
Y=test_data. Pny_lst [I]
Result=am. Model. Predict (x, steps=1)
# will be digital results into text
_, text=decode_ctc (result, train_data am_vocab)
Text="'. Join (text)
Results: print (' text ', text)
Print (' the original result: ', ' 'the join (y))
With sess. As_default () :
Text=text. The strip (' \ n '). The split (' ')
X=np. Array ([train_data. Pny_vocab. Index (pny) for pny in text])
X=x.r eshape (1, 1)
Preds=sess. Run (lm) preds, {lm. X: x})
Label=test_data. Han_lst [I]
Got="'. Join (train_data han_vocab [independence idx] for independence idx in preds [0])
Print (' the original characters: 'label)
Print (' recognition results: ', got)
Word_error_num +=min (len (label), GetEditDistance (label, got))
Word_num +=len (label)
Print (' word error rate: 'word_error_num/word_num)
Sess. Close ()
CodePudding user response:
Oh ~ how no one?CodePudding user response:
A ~ ~ ~ ~CodePudding user response:
Everyone a great god ~ please help ~ ~ ~