前言

前章讲述了seetaface6 Android 库的封装
这章主要讲述调用测试工程,主要用到androidcamerx

1: 创建android 工程
android studio ->new project ->Empty Activity
工程camerx 包名 com.example.camerx 最小SDK 一般选择 23(自行选择

2:工程一个页面代码如下

package com.example.camerx;

import androidx.annotation.NonNull;
import androidx.annotation.RequiresApi;
import androidx.appcompat.app.AppCompatActivity;

import android.annotation.SuppressLint;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.res.Configuration;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.ImageFormat;
import android.graphics.Matrix;
import android.media.Image;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.os.Handler;
import android.os.HandlerThread;
import android.provider.MediaStore;
import android.renderscript.Allocation;
import android.renderscript.Element;
import android.renderscript.RenderScript;
import android.renderscript.ScriptIntrinsicYuvToRGB;
import android.renderscript.Type;
import android.util.Log;
import android.util.TypedValue;
import android.view.Surface;
import android.view.TextureView;
import android.view.View;
import android.view.Window;
import android.view.WindowManager;

//
import androidx.camera.core.CameraSelector;
import androidx.camera.core.CameraX;
import androidx.camera.core.ImageAnalysis;
//import androidx.camera.core.ImageAnalysisConfig;
import androidx.camera.core.ImageCapture;
//import androidx.camera.core.ImageCaptureConfig;
import androidx.camera.core.ImageProxy;
import androidx.camera.core.Preview;
import androidx.camera.lifecycle.ProcessCameraProvider;
import androidx.camera.view.PreviewView;
import androidx.core.app.ActivityCompat;
import androidx.core.content.ContextCompat;
import android.util.Size;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;

import com.example.camerx.databinding.ActivityMainBinding;
import com.google.common.util.concurrent.ListenableFuture;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
///

public class MainActivity extends AppCompatActivity {

    private  String TAG = "MainActivity";
    // 动态获取权限
    private int REQUEST_CODE_PERMISSIONS=101;
    private final   String[]REQUIRED_PERMISSIONS=new    String[]{"android.permission.CAMERA", "android.permission.WRITE_EXTERNAL_STORAGE","android.permission.READ_EXTERNAL_STORAGE"};
    private ImageView imageView,imageView1;
    private Button btn_select_image;
    private static final int SELECT_IMAGE = 1;
    private SeetaFace seetaFace=new SeetaFace(); //初始化一个SeetaFace
    private static List<String> lst = new ArrayList<String>();  //初始化模型列表
    private TextView  text;
    private Button toggleCamera;
    private boolean isBackCamera =false;
    static {
        lst.add("age_predictor.csta");
        lst.add("eye_state.csta");
        lst.add("face_detector.csta");
        lst.add("face_landmarker_mask_pts5.csta");
        lst.add("face_landmarker_pts5.csta");
        lst.add("face_landmarker_pts68.csta");
        lst.add("fas_first.csta");
        lst.add("fas_second.csta");
        lst.add("gender_predictor.csta");
        lst.add("mask_detector.csta");
        lst.add("post_estimation.csta");
    }
    private ExecutorService cameraExecutor;
    private ActivityMainBinding mViewBinding;
   private PreviewView viewFinder;
  //  public   ImageView imageView ;

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        fullscreen();
        mViewBinding = ActivityMainBinding.inflate(getLayoutInflater());
        setContentView(mViewBinding.getRoot());
       // setContentView(R.layout.activity_main);
        imageView = findViewById(R.id.imageView);
        text = findViewById(R.id.text);
        toggleCamera= findViewById(R.id.toggle_camera);
        toggleCamera.setText("toggleCamera");
        toggleCamera.setTextSize(TypedValue.COMPLEX_UNIT_PX,16);
//        imageView1= findViewById(R.id.imageView1);
//        btn_select_image = findViewById(R.id.btn_select_image);
   //     cameraExecutor = Executors.newSingleThreadExecutor();
        cameraExecutor = Executors.newFixedThreadPool(2);
    //    viewFinder = (PreviewView)findViewById(R.id.viewFinder);
        toggleCamera.setOnClickListener(new View.OnClickListener(){
            @Override
            public void onClick(View v) {
                isBackCamera = !isBackCamera;
                startCamera();
            }
        });
        // 检查权限启动摄像头
        if (allPermissionsGranted()) {
          //  startCamera(); // 打开摄像头方法
            loadSeetaFaceModule();
            startCamera();
        } else {
            ActivityCompat.requestPermissions(this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS);
        }

//        btn_select_image.setOnClickListener(new View.OnClickListener(){
//            @Override
//            public void onClick(View v) {
//                Intent i = new Intent(Intent.ACTION_PICK);   //
//                i.setType("image/*"); //设置图片类型
//                startActivityForResult(i, SELECT_IMAGE);  //跳转界面,执行新的操作, SELECT_IMAGE代码在下面
//            }
//        });

       // setupcamera();
    }

    //全屏
    private void fullscreen(){
        if (getSupportActionBar() != null){   //继承的是 AppCompatActivity
            getSupportActionBar().hide();  //这行代码必须写在setContentView()方法的后面
        }else{
            this.requestWindowFeature(Window.FEATURE_NO_TITLE);  //继承  Activity   //这行代码必须写在setContentView()方法前面
        }
        this.getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN);
    }

    @Override
    protected void onStart() {
        super.onStart();
    }

    private void setupcamera(){
        // 检查权限并启动摄像头
        if (allPermissionsGranted()) {
            startCamera(); // 打开摄像头方法
        } else {
            ActivityCompat.requestPermissions(this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS);
        }
    }

    // 检查权限方法
    private boolean allPermissionsGranted(){
        //check if req permissions have been granted
        for(String permission : REQUIRED_PERMISSIONS){
            if(ContextCompat.checkSelfPermission(this, permission) != PackageManager.PERMISSION_GRANTED){
                return false;
            }
        }
        return true;
    }

    @Override
    public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
       // super.onRequestPermissionsResult(requestCode, permissions, grantResults);
        if (requestCode == REQUEST_CODE_PERMISSIONS) {

            if (allPermissionsGranted()) {
                loadSeetaFaceModule();
                startCamera();
            } else {
                Toast.makeText(this, "Permissions not granted by the user.", Toast.LENGTH_SHORT).show();
                finish();
            }
        }
    }

    // 打开摄像头方法
    @SuppressLint("UnsafeOptInUsageError")
    private void startCamera() {
        // 将Camera的生命周期和Activity绑定在一起(设定生命周期所有者),这样就不用手动控制相机的启动和关闭
        ListenableFuture<ProcessCameraProvider> cameraProviderFuture = ProcessCameraProvider.getInstance(this);

        cameraProviderFuture.addListener(() -> {
            try {
                // 将你的相机当前生命周期所有者绑定所需的对象
                ProcessCameraProvider processCameraProvider = cameraProviderFuture.get();

                // 创建一个Preview 实例,并设置该实例surface 提供者(provider)。
//                PreviewView viewFinder = (PreviewView)findViewById(R.id.viewFinder);
//                Preview preview = new Preview.Builder()
//                        .build();
//                preview.setSurfaceProvider(viewFinder.getSurfaceProvider());

                // 选择前置摄像头作为默认摄像头
                CameraSelector cameraSelector = isBackCamera?CameraSelector.DEFAULT_BACK_CAMERA:CameraSelector.DEFAULT_FRONT_CAMERA;

                // 创建拍照所需的实例
             //   imageCapture = new ImageCapture.Builder().build();

                // 设置预览分析
//                ImageAnalysis imageAnalysis = new ImageAnalysis.Builder()
//                        .build();
                ImageAnalysis imageAnalysis = new ImageAnalysis.Builder()
                   .setOutputImageFormat(ImageAnalysis.OUTPUT_IMAGE_FORMAT_RGBA_8888)
                        .setTargetResolution(new Size(720, 1280)) // 图片建议尺寸
                        .setOutputImageRotationEnabled(true) // 是否旋转分析器中得到的图片
                        .setTargetRotation(Surface.ROTATION_0) // 允许旋转后 得到图片的旋转设置
                        .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
                        .setImageQueueDepth(1)
                        .build();
              //  imageAnalysis.setAnalyzer(cameraExecutor, new MyAnalyzer());
                imageAnalysis.setAnalyzer(cameraExecutor, imageProxy -> {
                    // 下面处理数据
                    // runOnUiThread(() -> Toast.makeText(getApplicationContext(), "截取一帧", Toast.LENGTH_SHORT).show());
                    Bitmap bitmap = toBitmap(imageProxy.getImage()); //toBitmap  //yuv420ToBitmap
                   // Bitmap  bitmapOut = seetaFace_detectEx(bitmap);

                    int nResult = seetaFace_detectEx(bitmap);
                    String str = "sex:" + (nResult &amp;1) + " age="+ (nResult/2) ;
                    runOnUiThread(() ->{imageView.setImageBitmap(bitmap); text.setText(str);});

                    imageProxy.close(); // 最后关闭这个
                });

                // 重新绑定用例前先解绑
                processCameraProvider.unbindAll();

                // 绑定用例至相机
                processCameraProvider.bindToLifecycle(MainActivity.this, cameraSelector,
                        imageAnalysis);
//                processCameraProvider.bindToLifecycle(MainActivity.this, cameraSelector,
//                        preview,
//                        imageCapture,
//                        imageAnalysis);

            } catch (Exception e) {
                Log.e(TAG, "用例绑定失败!" + e);
            }
        }, ContextCompat.getMainExecutor(this));

    }


//    private ImageAnalysis setImageAnalysis() {
//
//    }

//    public static Bitmap imageProxyToBitmap(ImageProxy imageProxy) {
//        ByteBuffer byteBuffer = imageProxy.getPlanes()[0].getBuffer();
//        byte[] bytes = new byte[byteBuffer.remaining()];
//        byteBuffer.get(bytes);
//        return BitmapFactory.decodeByteArray(bytes,0,bytes.length);
//    }



    //TODO 把assert文件写入系统
    private boolean copyAssetAndWrite(String fileName){
        try {
            File cacheDir=getCacheDir();
            if (!cacheDir.exists()){
                cacheDir.mkdirs();
            }
            File outFile =new File(cacheDir,fileName);
            if (!outFile.exists()){
                boolean res=outFile.createNewFile();
                if (!res){
                    return false;
                }
            }else {
                if (outFile.length()>10){//表示已经写入一次
                    return true;
                }
            }
            InputStream is=getAssets().open(fileName);
            FileOutputStream fos = new FileOutputStream(outFile);
            byte[] buffer = new byte[1024];
            int byteCount;
            while ((byteCount = is.read(buffer)) != -1) {
                fos.write(buffer, 0, byteCount);
            }
            fos.flush();
            is.close();
            fos.close();
            return true;
        } catch (IOException e) {
            e.printStackTrace();
        }

        return false;
    }

    //加载模型文件
    private void loadSeetaFaceModule(){
        for(int i=0;i<lst.size();i++){
            boolean mkdir_model = copyAssetAndWrite(lst.get(i));  //放在了缓存
        }
        String dataPath = getCacheDir().getPath();
   //     Log.e("模型文件目录地址为:", dataPath);
        String[] functions={"landmark5","landmark68","live","age","bright","clarity","eyeState","faceMask","mask","gender","resolution","pose","integrity"};
        boolean ret_init = seetaFace.loadModel(dataPath,functions);
        if (ret_init){
            Log.i("加载模型:", "成功");   //+""即把bool转为字符串
        }
        else
            Log.e("加载模型:", "失败");   //+""即把bool转为字符串
    }
    


    public static Bitmap toBitmapRGBA2(Image image) {
        Image.Plane[] planes = image.getPlanes();
        ByteBuffer buffer = planes[0].getBuffer();
        Bitmap bitmap = Bitmap.createBitmap(image.getWidth(), image.getHeight(), Bitmap.Config.ARGB_8888);
        buffer.rewind();
        bitmap.copyPixelsFromBuffer(buffer);
        return bitmap;
    }

    private Bitmap toBitmap(Image image) {
        Image.Plane[] planes = image.getPlanes();
        ByteBuffer buffer = planes[0].getBuffer();
        int pixelStride = planes[0].getPixelStride();
        int rowStride = planes[0].getRowStride();
        int rowPadding = rowStride - pixelStride * image.getWidth();
        Bitmap bitmap = Bitmap.createBitmap(image.getWidth()+rowPadding/pixelStride,
                image.getHeight(), Bitmap.Config.ARGB_8888);
        bitmap.copyPixelsFromBuffer(buffer);
        return bitmap;
    }

    //
    private Bitmap yuv420ToBitmap(Image image) {
        RenderScript rs = RenderScript.create(MainActivity.this);
        ScriptIntrinsicYuvToRGB script = ScriptIntrinsicYuvToRGB.create(rs, Element.U8_4(rs));

        // Refer the logic in a section below on how to convert a YUV_420_888 image
        // to single channel flat 1D array. For sake of this example I'll abstract it
        // as a method.
        byte[] yuvByteArray = image2byteArray(image);

        Type.Builder yuvType = new Type.Builder(rs, Element.U8(rs)).setX(yuvByteArray.length);
        Allocation in = Allocation.createTyped(rs, yuvType.create(), Allocation.USAGE_SCRIPT);

        Type.Builder rgbaType = new Type.Builder(rs, Element.RGBA_8888(rs))
                .setX(image.getWidth())
                .setY(image.getHeight());
        Allocation out = Allocation.createTyped(rs, rgbaType.create(), Allocation.USAGE_SCRIPT);

        // The allocations above "should" be cached if you are going to perform
        // repeated conversion of YUV_420_888 to Bitmap.
        in.copyFrom(yuvByteArray);
        script.setInput(in);
        script.forEach(out);

        Bitmap bitmap = Bitmap.createBitmap(image.getWidth(), image.getHeight(), Bitmap.Config.ARGB_8888);
        out.copyTo(bitmap);
        return bitmap;
    }

    private byte[] image2byteArray(Image image) {
        if (image.getFormat() != ImageFormat.YUV_420_888) {
            throw new IllegalArgumentException("Invalid image format");
        }

        int width = image.getWidth();
        int height = image.getHeight();

        Image.Plane yPlane = image.getPlanes()[0];
        Image.Plane uPlane = image.getPlanes()[1];
        Image.Plane vPlane = image.getPlanes()[2];

        ByteBuffer yBuffer = yPlane.getBuffer();
        ByteBuffer uBuffer = uPlane.getBuffer();
        ByteBuffer vBuffer = vPlane.getBuffer();

        // Full size Y channel and quarter size U+V channels.
        int numPixels = (int) (width * height * 1.5f);
        byte[] nv21 = new byte[numPixels];
        int index = 0;

        // Copy Y channel.
        int yRowStride = yPlane.getRowStride();
        int yPixelStride = yPlane.getPixelStride();
        for(int y = 0; y < height; ++y) {
            for (int x = 0; x < width; ++x) {
                nv21[index++] = yBuffer.get(y * yRowStride + x * yPixelStride);
            }
        }

        // Copy VU data; NV21 format is expected to have YYYYVU packaging.
        // The U/V planes are guaranteed to have the same row stride and pixel stride.
        int uvRowStride = uPlane.getRowStride();
        int uvPixelStride = uPlane.getPixelStride();
        int uvWidth = width / 2;
        int uvHeight = height / 2;

        for(int y = 0; y < uvHeight; ++y) {
            for (int x = 0; x < uvWidth; ++x) {
                int bufferIndex = (y * uvRowStride) + (x * uvPixelStride);
                // V channel.
                nv21[index++] = vBuffer.get(bufferIndex);
                // U channel.
                nv21[index++] = uBuffer.get(bufferIndex);
            }
        }
        return nv21;
    }


    private  int  seetaFace_detectEx(Bitmap bm){
        int nResult =  seetaFace.detectFaceEx(bm,27);// 1 detect  2 point5 4 point68 8 sex  16 age
        if(nResult < 0) {
            Log.i("seetaFace_detectEx", "seetaFace_detectEx:" + nResult);
        }else{
           // Log.i("seetaFace_detectEx", "sex:" + (nResult &amp;1) + " age="+ (nResult/2));

        }
        return  nResult;
    }

}

3:布局如下

<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
    xmlns:app="http://schemas.android.com/apk/res-auto"
    xmlns:tools="http://schemas.android.com/tools"
    android:layout_width="match_parent"
    android:layout_height="match_parent"
    tools:context=".MainActivity">

    <!--androidx.camera.view.PreviewView
        android:id="@+id/viewFinder"
        android:layout_width="match_parent"
        android:layout_height="match_parent" /-->
    <ImageView
        android:id="@+id/imageView"
        android:layout_width="0dp"
        android:layout_height="0dp"
        android:layout_gravity="center"
        app:layout_constraintBottom_toBottomOf="parent"
        app:layout_constraintLeft_toLeftOf="parent"
        app:layout_constraintRight_toRightOf="parent"
        app:layout_constraintTop_toTopOf="parent"/>

    <TextView
        android:id="@+id/text"
        android:layout_width="wrap_content"
        android:layout_height="wrap_content"
        android:layout_centerInParent="true"
        android:background="#000000"
        android:gravity="center"
        android:textColor="#ffffff"
        app:layout_constraintEnd_toStartOf="@+id/imageView"
        app:layout_constraintHorizontal_bias="0.0"
        app:layout_constraintStart_toStartOf="parent"
        tools:ignore="MissingConstraints"
        tools:layout_editor_absoluteY="0dp" />

    <Button
        android:id="@+id/toggle_camera"
        android:layout_width="120dp"
        android:layout_height="35dp"
        android:layout_marginRight="16dp"
        android:layout_marginBottom="16px"
        android:layout_gravity="bottom"
        app:layout_constraintRight_toRightOf="parent"
        app:layout_constraintBottom_toBottomOf="parent" />


</androidx.constraintlayout.widget.ConstraintLayout>

4:AndroidMainfest.xml 增加3个权限

<uses-permission android:name="android.permission.CAMERA"/>
    <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
    <uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>

5:build.gradle内容如下
签名signingConfigs) 自行设定,自行设定,自行设定

plugins {
    id 'com.android.application'
}

android {
    compileSdkVersion 31
    buildToolsVersion '30.0.3'

    defaultConfig {
        applicationId "com.example.camerx"
        minSdkVersion 23
        targetSdkVersion 30
        versionCode 1
        versionName "1.0"

        testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"

        ndk {
            abiFilters 'armeabi-v7a', 'arm64-v8a'
        }
    }
    //增加签名 
    signingConfigs {
        release {
            storeFile file('****') //自行设定 //自行设定  //自行设定
            storePassword '****'     //自行设定
            keyAlias '****'			//自行设定
            keyPassword '****'		//自行设定
            //           v2SigningEnabled false
        }
    }

    buildTypes {
        release {
            minifyEnabled false
            proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
            signingConfig signingConfigs.release //#问题出在这里打包没有应用签名
            //        今天jenkins持续集成gradle通过命令打包apk安装应用提示应用安装”,通过adb install 提示INSTALL_PARSE_FAILED_NO_CERTIFICATES;
            //       言下之意就是应用没有签名,将apk后缀改成.zip打开,META-INF 目录查看是否有CERT.RSA文件没有就是没有签名
        }
    }
    compileOptions {
        sourceCompatibility JavaVersion.VERSION_1_8
        targetCompatibility JavaVersion.VERSION_1_8
    }
    //eh add
    sourceSets{
        main{
            jniLibs.srcDirs = ['/src/main/libs']
        }
    }

    buildFeatures {
        viewBinding true
    }
}

dependencies {

    implementation 'androidx.appcompat:appcompat:1.1.0'
    implementation 'com.google.android.material:material:1.1.0'
    implementation 'androidx.constraintlayout:constraintlayout:1.1.3'
    testImplementation 'junit:junit:4.+'
    androidTestImplementation 'androidx.test.ext:junit:1.1.1'
    androidTestImplementation 'androidx.test.espresso:espresso-core:3.2.0'

    def camerax_version = "1.1.0-beta03"
// CameraX core library
    implementation "androidx.camera:camera-core:$camerax_version"
// CameraX Camera2 extensions[可选]拓展库可实现人像、HDR、夜间和美颜、滤镜但依赖于OEM
    implementation "androidx.camera:camera-camera2:$camerax_version"
// CameraX Lifecycle library[可选]避免手动在生命周期释放和销毁数据
    implementation "androidx.camera:camera-lifecycle:$camerax_version"
// CameraX View class[可选]最佳实践,最好用里面的PreviewView,它会自行判断用SurfaceView还是TextureView来实现
    implementation "androidx.camera:camera-view:$camerax_version"

    implementation "androidx.camera:camera-extensions:${camerax_version}"

   // implementation fileTree(include: ['*.jar'], dir: 'libs')
}

6:工程编译手机测试

用了一张网上的图片识别了下,会显示性别及年龄
在这里插入图片描述

7:到此工程结束
视频实时识别,感觉有点卡,下章会继续优化
整个demo工程上传,方便大家参考
喜欢麻烦点个赞吧

原文地址:https://blog.csdn.net/yunteng521/article/details/126007964

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任

如若转载,请注明出处:http://www.7code.cn/show_29390.html

如若内容造成侵权/违法违规/事实不符,请联系代码007邮箱:suwngjj01@126.com进行投诉反馈,一经查实,立即删除

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注