Android 浏览图像和人脸检测
我在检测浏览图像的人脸时遇到了一些问题。我知道问题是我不知道如何在导入的图像上应用我正在测试的人脸检测代码。我正在测试的示例代码是为本地存储的图像编写的。我相信我很接近,但你能帮我吗 首先,我创建了一个gallery方法Android 浏览图像和人脸检测,android,image-processing,face-detection,Android,Image Processing,Face Detection,我在检测浏览图像的人脸时遇到了一些问题。我知道问题是我不知道如何在导入的图像上应用我正在测试的人脸检测代码。我正在测试的示例代码是为本地存储的图像编写的。我相信我很接近,但你能帮我吗 首先,我创建了一个gallery方法 protected void gallery() { Intent intent = new Intent(); intent.setType("image/*"); intent.setAction("android.intent.action
protected void gallery() {
Intent intent = new Intent();
intent.setType("image/*");
intent.setAction("android.intent.action.GET_CONTENT");
startActivityForResult(Intent.createChooser(intent, "Choose An Image"), 1);
}
我仍在学习意图之类的东西,但据我所知,我需要使用意图来使用Android的gallery,因为我设置动作来获取内容,所以我也在使用意图来向它传递信息。话虽如此,我试图将意图中的信息传递给uri。这就是我接下来做的
protected void onActivityResult(int requestCode, int resultCode, Intent intent) {
super.onActivityResult(requestCode, resultCode, intent);
if(requestCode == 1 && resultCode == RESULT_OK)
{
Uri uri = intent.getData();
try {
InputStream is = getContentResolver().openInputStream(uri);
Bitmap bitmap = BitmapFactory.decodeStream(is);
ImageView image = (ImageView)findViewById(R.id.img_view);
image.setImageBitmap(bitmap);
} catch (Exception e) {
e.printStackTrace();
}
}
}
这是让我困惑的部分。我猜InputStream有图像信息吗?我试着在同一个try catch中应用人脸检测代码。我认为在image.setImageBitmap(位图)完成后,就是应用人脸检测的时间了。这是人脸检测代码
protected void onActivityResult(int requestCode, int resultCode, Intent intent) {
super.onActivityResult(requestCode, resultCode, intent);
if(requestCode == 1 && resultCode == RESULT_OK)
{
Uri uri = intent.getData();
try {
InputStream is = getContentResolver().openInputStream(uri);
Bitmap bitmap = BitmapFactory.decodeStream(is);
ImageView image = (ImageView)findViewById(R.id.image_view);
image.setImageBitmap(bitmap);
BitmapFactory.Options options = new BitmapFactory.Options();
options.inPreferredConfig=Bitmap.Config.RGB_565;
bitmap = BitmapFactory.decodeResource(getResources(), R.id.img_view, options);
imageWidth = bitmap.getWidth();
imageHeight = bitmap.getHeight();
detectedFaces = new FaceDetector.Face[NUM_FACES];
faceDetector= new FaceDetector(imageWidth, imageHeight, NUM_FACES);
NUM_FACE_DETECTED = faceDetector.findFaces(bitmap, detectedFaces);
mIL.invalidate();
} catch (Exception e) {
e.printStackTrace();
}
}
}
我不知道如何将“mFaceBitmap=BitmapFactory.decodeResource(getResources(),R.drawable.smilingfaces,options)”;“这是用于本地图像的,更改为我认为存储在InputStream中的图像(或者是它吗?所选图像在哪里?)。我想出了一个主意,改为进行imageView布局,因为图像在布局中。我不明白所有的转移和转移是如何协同工作的。不管怎样,该代码段被假定为检测人脸。然后onDraw()在检测到的面周围绘制正方形。我不知道该把它放在哪里,但我把它放在了onActivityResult()之外
受保护的void onDraw(画布){
绘制myPaint=新绘制();
myPaint.setColor(颜色:红色);
myPaint.setStyle(Paint.Style.STROKE);
myPaint.设置行程宽度(3);
myPaint.setDither(真);
对于(int count=0;count
有什么建议吗?我很快就要让它工作了 我明白你真正想要什么。我将为您编写完整的代码,然后继续 在这段代码中,我在layout中创建了一个imageview,有两个类,一个是activity类,另一个是imageview类 我将创建两个按钮,其中一个按钮用于从gallery中选择图像并显示它(用于人脸检测),另一个按钮用于检测所选图像上的人脸 首先是mainlayout.xml
<?xml version="1.0" encoding="utf-8"?>
<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent" >
<com.simpleapps.facedetection.MyView
android:id="@+id/faceview"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
/>
<LinearLayout
android:orientation="horizontal"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:layout_gravity="top">
<ImageView
android:id="@+id/gallery"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_marginRight="10dp"
android:layout_weight="1"
android:background="@drawable/gallery" />
<ImageView
android:id="@+id/detectf"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_marginRight="10dp"
android:layout_weight="1"
android:background="@drawable/detect" />
</LinearLayout>
</FrameLayout>
现在是视图类
MyView.java
public class MyView extends View {
private FaceDetector.Face[] detectedFaces;
private int NUMBER_OF_FACES=10;
private FaceDetector faceDetector;
private int NUMBER_OF_FACE_DETECTED;
private float eyeDistance;
public Paint myPaint;
public Bitmap resultBmp;
public Bitmap myBitmap,HairBitmap;
public PointF midPoint1;
public MyView(Context context, AttributeSet attrs) {
super(context,attrs);
// TODO Auto-generated constructor stub
BitmapFactory.Options bitmapFatoryOptions=new BitmapFactory.Options();
bitmapFatoryOptions.inPreferredConfig=Bitmap.Config.RGB_565;
}
public void setImage(Bitmap bitmap) {
myBitmap = bitmap;
invalidate();
}
public void facedetect(){
myPaint = new Paint();
myPaint.setColor(Color.GREEN);
myPaint.setStyle(Paint.Style.STROKE);
myPaint.setStrokeWidth(3);
detectedFaces=new FaceDetector.Face[NUMBER_OF_FACES];
faceDetector=new FaceDetector(resultBmp.getWidth(),resultBmp.getHeight(),NUMBER_OF_FACES);
NUMBER_OF_FACE_DETECTED=faceDetector.findFaces(resultBmp, detectedFaces);
System.out.println("faces detected are"+NUMBER_OF_FACE_DETECTED);
Canvas facec=new Canvas();
for(int count=0;count<NUMBER_OF_FACE_DETECTED;count++)
{
if(count==0){
face1=detectedFaces[count];
midPoint1=new PointF();
face1.getMidPoint(midPoint1);
eyeDistance=face1.eyesDistance();
}
}
invalidate();
if(NUMBER_OF_FACE_DETECTED==0){
Toast.makeText(getContext(), "no faces detected", Toast.LENGTH_LONG).show();
}else if(NUMBER_OF_FACE_DETECTED!=0){
Toast.makeText(getContext(), "faces detected "+NUMBER_OF_FACE_DETECTED, Toast.LENGTH_LONG).show();
}
}
protected void onDraw(Canvas canvas)
{
if(myBitmap!=null)
{
w = myBitmap.getWidth();
h = myBitmap.getHeight();
resultBmp = null;
int widthofBitMap = MainActivity.screenWidth ;
int heightofBitMap = widthofBitMap*h/w;
resultBmp = Bitmap.createScaledBitmap(myBitmap, widthofBitMap, heightofBitMap, true);
canvas.drawBitmap(resultBmp, (MainActivity.screenWidth-widthofBitMap)/2,(MainActivity.screenHeight-heightofBitMap)/2, null);
}
}
@Override
public boolean onTouchEvent(MotionEvent event) {
// TODO Auto-generated method stub
int action = event.getAction();
switch(action){
case MotionEvent.ACTION_MOVE:
x = event.getX();
y = event.getY();
break;
case MotionEvent.ACTION_DOWN:
x = event.getX();
y = event.getY();
break;
case MotionEvent.ACTION_UP:
default:
}
invalidate();
return true;
}
}
公共类MyView扩展了视图{
专用人脸检测器。人脸[]检测到的人脸;
面的私有整数=10;
专用人脸检测器;
检测到的面的私有整数;
私人浮动视距;
公共油漆;
公共交通管理局;
公共位图myBitmap,HairBitmap;
公共点F中点1;
公共MyView(上下文、属性集属性){
超级(上下文,attrs);
//TODO自动生成的构造函数存根
选项bitmapFatoryOptions=新的BitmapFactory.Options();
bitmapFatoryOptions.inPreferredConfig=Bitmap.Config.RGB_565;
}
公共void setImage(位图){
myBitmap=位图;
使无效();
}
公共void facedetect(){
myPaint=新油漆();
myPaint.setColor(颜色.绿色);
myPaint.setStyle(Paint.Style.STROKE);
myPaint.设置行程宽度(3);
detectedFaces=新的FaceDetector.Face[面数];
faceDetector=新的faceDetector(resultmp.getWidth()、resultmp.getHeight()、面数);
检测到的面数=面检测器。查找面(ResultMP,detectedFaces);
System.out.println(“检测到的面为”+检测到的面数);
Canvas facec=新画布();
对于(int count=0;count)我发现了一件事。我的onDraw没有被调用,所以我添加了invalidate();另外,我有一个想法。因为我在imageView中显示选定的图像,所以我可以执行bitmap=BitmapFactory.decodeResource(getResources(),R.id.image_视图,选项);这些是好的步骤吗?仍然不起作用。如果我不扩展ImageView,我不相信onDraw有效?或者这有关系吗?谢谢!让我看一下。希望这能解决我的问题。这不是一个解决方案。您的代码示例存在许多问题。布局不存在,没有实现onClickListener,缺少变量,缺少我方法。我试图提取一些可能有用的信息,但我仍然在原地踏步。无论如何,谢谢。是的,这就是我要问我的…我知道有错误,但从我的代码和张贴在这里。有什么问题,我会帮你。我从我的应用程序中撕下它。所以你可能会发现很多错误。我会帮你清除它。告诉me错误是什么。@SandeepR您好,请您指导我android 10的工作。我想将用户面部与本地存储的文件/图像进行比较。
public class MainActivity extends Activity {
public MyView faceview;
public static Bitmap defaultBitmap;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,
WindowManager.LayoutParams.FLAG_FULLSCREEN);
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);
setContentView(R.layout.activity_main);
DisplayMetrics displaymetrics = new DisplayMetrics();
getWindowManager().getDefaultDisplay().getMetrics(displaymetrics);
screenHeight = displaymetrics.heightPixels;
screenWidth = displaymetrics.widthPixels;
faceview = (MyView)findViewById(R.id.faceview);
myGallery = (LinearLayout)findViewById(R.id.mygallery);
gallery=(ImageView)findViewById(R.id.gallery);
detectf=(ImageView)findViewById(R.id.detectf);
BitmapFactory.Options bitmapFatoryOptions=new BitmapFactory.Options();
bitmapFatoryOptions.inPreferredConfig=Bitmap.Config.RGB_565;
defaultBitmap=BitmapFactory.decodeResource(getResources(), R.drawable.face,bitmapFatoryOptions);
faceview.setImage(defaultBitmap);
gallery.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
// TODO Auto-generated method stub
Intent intent = new Intent(Intent.ACTION_GET_CONTENT);
intent.setType("image/*");
startActivityForResult(intent, 0 );
}
});
detectf.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
// TODO Auto-generated method stub
faceview.facedetect();
}
});
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (resultCode == Activity.RESULT_OK) {
if(requestCode==0){
imageURI = data.getData();
try {
BitmapFactory.Options bitmapFatoryOptions=new BitmapFactory.Options();
bitmapFatoryOptions.inPreferredConfig=Bitmap.Config.RGB_565;
Bitmap b =
BitmapFactory.decodeStream(getContentResolver().openInputStream(imageURI), null,
bitmapFatoryOptions);
faceview.myBitmap=b;
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
faceview.invalidate();
}
faceview.invalidate();
} else {
System.exit(0);
Log.e("result", "BAD");
}
}
}
public class MyView extends View {
private FaceDetector.Face[] detectedFaces;
private int NUMBER_OF_FACES=10;
private FaceDetector faceDetector;
private int NUMBER_OF_FACE_DETECTED;
private float eyeDistance;
public Paint myPaint;
public Bitmap resultBmp;
public Bitmap myBitmap,HairBitmap;
public PointF midPoint1;
public MyView(Context context, AttributeSet attrs) {
super(context,attrs);
// TODO Auto-generated constructor stub
BitmapFactory.Options bitmapFatoryOptions=new BitmapFactory.Options();
bitmapFatoryOptions.inPreferredConfig=Bitmap.Config.RGB_565;
}
public void setImage(Bitmap bitmap) {
myBitmap = bitmap;
invalidate();
}
public void facedetect(){
myPaint = new Paint();
myPaint.setColor(Color.GREEN);
myPaint.setStyle(Paint.Style.STROKE);
myPaint.setStrokeWidth(3);
detectedFaces=new FaceDetector.Face[NUMBER_OF_FACES];
faceDetector=new FaceDetector(resultBmp.getWidth(),resultBmp.getHeight(),NUMBER_OF_FACES);
NUMBER_OF_FACE_DETECTED=faceDetector.findFaces(resultBmp, detectedFaces);
System.out.println("faces detected are"+NUMBER_OF_FACE_DETECTED);
Canvas facec=new Canvas();
for(int count=0;count<NUMBER_OF_FACE_DETECTED;count++)
{
if(count==0){
face1=detectedFaces[count];
midPoint1=new PointF();
face1.getMidPoint(midPoint1);
eyeDistance=face1.eyesDistance();
}
}
invalidate();
if(NUMBER_OF_FACE_DETECTED==0){
Toast.makeText(getContext(), "no faces detected", Toast.LENGTH_LONG).show();
}else if(NUMBER_OF_FACE_DETECTED!=0){
Toast.makeText(getContext(), "faces detected "+NUMBER_OF_FACE_DETECTED, Toast.LENGTH_LONG).show();
}
}
protected void onDraw(Canvas canvas)
{
if(myBitmap!=null)
{
w = myBitmap.getWidth();
h = myBitmap.getHeight();
resultBmp = null;
int widthofBitMap = MainActivity.screenWidth ;
int heightofBitMap = widthofBitMap*h/w;
resultBmp = Bitmap.createScaledBitmap(myBitmap, widthofBitMap, heightofBitMap, true);
canvas.drawBitmap(resultBmp, (MainActivity.screenWidth-widthofBitMap)/2,(MainActivity.screenHeight-heightofBitMap)/2, null);
}
}
@Override
public boolean onTouchEvent(MotionEvent event) {
// TODO Auto-generated method stub
int action = event.getAction();
switch(action){
case MotionEvent.ACTION_MOVE:
x = event.getX();
y = event.getY();
break;
case MotionEvent.ACTION_DOWN:
x = event.getX();
y = event.getY();
break;
case MotionEvent.ACTION_UP:
default:
}
invalidate();
return true;
}
}