I am working on one android application using tenserflow. when I run the application and debug the code it shows java.lang.NoSuchMethodError: No virtual method
My code:-
ClassifierActivity.java
public class ClassifierActivity extends CameraActivity implements OnImageAvailableListener {
private static final boolean MAINTAIN_ASPECT = false;
private static final Size DESIRED_PREVIEW_SIZE = new Size(960, 480);
private static final float TEXT_SIZE_DIP = 10;
private Bitmap rgbFrameBitmap = null;
private Bitmap croppedBitmap = null;
private Bitmap cropCopyBitmap = null;
private long lastProcessingTimeMs;
private Integer sensorOrientation;
private Classifier classifier;
private Matrix frameToCropTransform;
private Matrix cropToFrameTransform;
private BorderedText borderedText;
@Override
protected int getLayoutId() {
return R.layout.camera_connection_fragment;
}
@Override
protected Size getDesiredPreviewFrameSize() {
return DESIRED_PREVIEW_SIZE;
}
@Override
public void onPreviewSizeChosen(final Size size, final int rotation) {
final float textSizePx =
TypedValue.applyDimension(
TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
borderedText = new BorderedText(textSizePx);
borderedText.setTypeface(Typeface.MONOSPACE);
recreateClassifier(getModel(), getDevice(), getNumThreads());
if (classifier == null) {
//LOGGER.e("No classifier on preview!");
return;
}
previewWidth = size.getWidth();
previewHeight = size.getHeight();
sensorOrientation = rotation - getScreenOrientation();
//LOGGER.i("Camera orientation relative to screen canvas: %d", sensorOrientation);
//LOGGER.i("Initializing at size %dx%d", previewWidth, previewHeight);
rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Config.ARGB_8888);
croppedBitmap =
Bitmap.createBitmap(
classifier.getImageSizeX(), classifier.getImageSizeY(), Config.ARGB_8888);
frameToCropTransform =
ImageUtils.getTransformationMatrix(
previewWidth,
previewHeight,
classifier.getImageSizeX(),
classifier.getImageSizeY(),
sensorOrientation,
MAINTAIN_ASPECT);
cropToFrameTransform = new Matrix();
frameToCropTransform.invert(cropToFrameTransform);
}
@Override
protected void processImage() {
rgbFrameBitmap.setPixels(getRgbBytes(), 0, previewWidth, 0, 0, previewWidth, previewHeight);
final Canvas canvas = new Canvas(croppedBitmap);
canvas.drawBitmap(rgbFrameBitmap, frameToCropTransform, null);
runInBackground(
new Runnable() {
@Override
public void run() {
if (classifier != null) {
final long startTime = SystemClock.uptimeMillis();
final List<Classifier.Recognition> results = classifier.recognizeImage(croppedBitmap);
lastProcessingTimeMs = SystemClock.uptimeMillis() - startTime;
//LOGGER.v("Detect: %s", results);
cropCopyBitmap = Bitmap.createBitmap(croppedBitmap);
runOnUiThread(
new Runnable() {
@Override
public void run() {
showResultsInBottomSheet(results);
showFrameInfo(previewWidth "x" previewHeight);
showCropInfo(cropCopyBitmap.getWidth() "x" cropCopyBitmap.getHeight());
showCameraResolution(canvas.getWidth() "x" canvas.getHeight());
showRotationInfo(String.valueOf(sensorOrientation));
showInference(lastProcessingTimeMs "ms");
}
});
}
readyForNextImage();
}
});
}
@Override
protected void onInferenceConfigurationChanged() {
if (croppedBitmap == null) {
// Defer creation until we're getting camera frames.
return;
}
final Device device = getDevice();
final Model model = getModel();
final int numThreads = getNumThreads();
runInBackground(() -> recreateClassifier(model, device, numThreads));
}
private void recreateClassifier(Model model, Device device, int numThreads) {
if (classifier != null) {
//LOGGER.d("Closing classifier.");
classifier.close();
classifier = null;
}
if (device == Device.GPU && model == Model.QUANTIZED) {
//LOGGER.d("Not creating classifier: GPU doesn't support quantized models.");
runOnUiThread(
() -> {
Toast.makeText(this, "GPU does not yet supported quantized models.", Toast.LENGTH_LONG)
.show();
});
return;
}
try {
numThreads);
classifier = Classifier.create(this, model, device, numThreads);
} catch (IOException e) {
//LOGGER.e(e, "Failed to create classifier.");
}
}
}
Classifier.java
public abstract class Classifier {
public enum Model {
FLOAT,
QUANTIZED,
}
public enum Device {
CPU,
NNAPI,
GPU
}
private final int[] intValues = new int[getImageSizeX() * getImageSizeY()];
private final Interpreter.Options tfliteOptions = new Interpreter.Options();
private MappedByteBuffer tfliteModel;
private List<String> labels;
private GpuDelegate gpuDelegate = null;
protected Interpreter tflite;
protected ByteBuffer imgData = null;
public static Classifier create(Activity activity, Model model, Device device, int numThreads)
throws IOException {
if (model == Model.QUANTIZED) {
return new ClassifierQuantizedMobileNet(activity, device, numThreads);
} else {
return new ClassifierFloatMobileNet(activity, device, numThreads);
}
}
private final String id;
private RectF location;
public Recognition(
final String id, final String title, final Float confidence, final RectF location) {
this.id = id;
this.title = title;
this.confidence = confidence;
this.location = location;
}
public String getId() {
return id;
}
public String getTitle() {
return title;
}
public Float getConfidence() {
return confidence;
}
public RectF getLocation() {
return new RectF(location);
}
public void setLocation(RectF location) {
this.location = location;
}
@Override
public String toString() {
String resultString = "";
if (id != null) {
resultString = "[" id "] ";
}
if (title != null) {
resultString = title " ";
}
if (confidence != null) {
resultString = String.format("(%.1f%%) ", confidence * 100.0f);
}
if (location != null) {
resultString = location " ";
}
return resultString.trim();
}
}
protected Classifier(Activity activity, Device device, int numThreads) throws IOException {
tfliteModel = loadModelFile(activity);
switch (device) {
case NNAPI:
tfliteOptions.setUseNNAPI(true);
break;
case GPU:
gpuDelegate = new GpuDelegate();
tfliteOptions.addDelegate(gpuDelegate);
break;
case CPU:
break;
}
tfliteOptions.setNumThreads(numThreads);
tflite = new Interpreter(tfliteModel, tfliteOptions);
labels = loadLabelList(activity);
imgData =
ByteBuffer.allocateDirect(
DIM_BATCH_SIZE
* getImageSizeX()
* getImageSizeY()
* DIM_PIXEL_SIZE
* getNumBytesPerChannel());
imgData.order(ByteOrder.nativeOrder());
//LOGGER.d("Created a Tensorflow Lite Image Classifier.");
}
private List<String> loadLabelList(Activity activity) throws IOException {
List<String> labels = new ArrayList<String>();
BufferedReader reader =
new BufferedReader(new InputStreamReader(activity.getAssets().open(getLabelPath())));
String line;
while ((line = reader.readLine()) != null) {
labels.add(line);
}
reader.close();
return labels;
}
private MappedByteBuffer loadModelFile(Activity activity) throws IOException {
AssetFileDescriptor fileDescriptor = activity.getAssets().openFd(getModelPath());
FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor());
FileChannel fileChannel = inputStream.getChannel();
long startOffset = fileDescriptor.getStartOffset();
long declaredLength = fileDescriptor.getDeclaredLength();
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength);
}
private void convertBitmapToByteBuffer(Bitmap bitmap) {
if (imgData == null) {
return;
}
imgData.rewind();
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
// Convert the image to floating point.
int pixel = 0;
long startTime = SystemClock.uptimeMillis();
for (int i = 0; i < getImageSizeX(); i) {
for (int j = 0; j < getImageSizeY(); j) {
final int val = intValues[pixel ];
addPixelValue(val);
}
}
long endTime = SystemClock.uptimeMillis();
//LOGGER.v("Timecost to put values into ByteBuffer: " (endTime - startTime));
}
}
java.lang.NoSuchMethodError: No virtual method getAccelerationConfig()
Lorg/tensorflow/lite/acceleration/ValidatedAccelerationConfig;
in class Lorg/tensorflow/lite/InterpreterImpl$Options;
or its super classes (declaration of
'org.tensorflow.lite.InterpreterImpl$Options' appears in
/data/app/org.tensorflow.lite.examples.detection-
w3joIsrUlwpT9hRj_txyLQ==/base.apk!classes12.dex)
at org.tensorflow.lite.NativeInterpreterWrapper.init(NativeInterpreterWrapper.java:80)
at org.tensorflow.lite.NativeInterpreterWrapper.<init>(NativeInterpreterWrapper.java:73)
at org.tensorflow.lite.NativeInterpreterWrapperExperimental.<init>(NativeInterpreterWrapperExperimental.java:36)
at org.tensorflow.lite.Interpreter.<init>(Interpreter.java:224)
at org.tensorflow.lite.examples.detection.Currency.tflite.Classifier.<init>(Classifier.java:175)
at org.tensorflow.lite.examples.detection.Currency.tflite.ClassifierFloatMobileNet.<init>(ClassifierFloatMobileNet.java:41)
at org.tensorflow.lite.examples.detection.Currency.tflite.Classifier.create(Classifier.java:86)
at org.tensorflow.lite.examples.detection.Currency.ClassifierActivity.recreateClassifier(ClassifierActivity.java:166)
at org.tensorflow.lite.examples.detection.Currency.ClassifierActivity.onPreviewSizeChosen(ClassifierActivity.java:70)
at org.tensorflow.lite.examples.detection.Currency.CameraActivity.onPreviewFrame(CameraActivity.java:235)
at android.hardware.Camera$EventHandler.handleMessage(Camera.java:1400)
at android.os.Handler.dispatchMessage(Handler.java:106)
CodePudding user response:
Add this in your Manifest file where you have declared activity and exported.
android:hardwareAccelerated="true"
CodePudding user response:
Probably runtime dependencies are different from compile-time ones. Method with name getAccelerationConfig
is missed in used compiled version of code.