您好,登錄后才能下訂單哦!
小編給大家分享一下OPENCV+JAVA如何實現人臉識別,希望大家閱讀完這篇文章之后都有所收獲,下面讓我們一起去探討吧!
具體內容如下
官方下載 安裝文件 ,以win7為例,下載opencv-2.4.13.3-vc14.exe
安裝后,在build目錄下 D:\opencv\build\java,獲取opencv-2413.jar,copy至項目目錄
同時需要dll文件 與 各 識別xml文件,進行不同特征的識別(人臉,側臉,眼睛等)
dll目錄:D:\opencv\build\java\x64\opencv_java2413.dll
xml目錄:D:\opencv\sources\data\haarcascades\haarcascade_frontalface_alt.xml(目錄中有各類識別文件)
項目結構:
具體代碼:由于需要用到 opencv 的dll文件,故要么放在java library path 中,或放在jre lib 中,windows下可放在System32目錄下,也可以在代碼中動態加載,如下:
package opencv; import com.sun.scenario.effect.ImageData; import org.opencv.core.*; import org.opencv.core.Point; import org.opencv.highgui.Highgui; import org.opencv.imgproc.Imgproc; import org.opencv.objdetect.CascadeClassifier; import javax.imageio.ImageIO; import javax.swing.*; import java.awt.*; import java.awt.image.BufferedImage; import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.Vector; /** * Created by Administrator on 2017/8/17. */ public class Test { static{ // 導入opencv的庫 String opencvpath = System.getProperty("user.dir") + "\\opencv\\x64\\"; String libPath = System.getProperty("java.library.path"); String a = opencvpath + Core.NATIVE_LIBRARY_NAME + ".dll"; System.load(opencvpath + Core.NATIVE_LIBRARY_NAME + ".dll"); } public static String getCutPath(String filePath){ String[] splitPath = filePath.split("\\."); return splitPath[0]+"Cut"+"."+splitPath[1]; } public static void process(String original,String target) throws Exception { String originalCut = getCutPath(original); String targetCut = getCutPath(target); if(detectFace(original,originalCut) && detectFace(target,targetCut)){ } } public static boolean detectFace(String imagePath,String outFile) throws Exception { System.out.println("\nRunning DetectFaceDemo"); // 從配置文件lbpcascade_frontalface.xml中創建一個人臉識別器,該文件位于opencv安裝目錄中 CascadeClassifier faceDetector = new CascadeClassifier( "C:\\Users\\Administrator\\Desktop\\opencv\\haarcascade_frontalface_alt.xml"); Mat image = Highgui.imread(imagePath); // 在圖片中檢測人臉 MatOfRect faceDetections = new MatOfRect(); faceDetector.detectMultiScale(image, faceDetections); System.out.println(String.format("Detected %s faces", faceDetections.toArray().length)); Rect[] rects = faceDetections.toArray(); if(rects != null && rects.length > 1){ throw new RuntimeException("超過一個臉"); } // 在每一個識別出來的人臉周圍畫出一個方框 Rect rect = rects[0]; Core.rectangle(image, new Point(rect.x-2, rect.y-2), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 255, 0)); Mat sub = image.submat(rect); Mat mat = new Mat(); Size size = new Size(300, 300); Imgproc.resize(sub, mat, size);//將人臉進行截圖并保存 return Highgui.imwrite(outFile, mat); // 將結果保存到文件 // String filename = "C:\\Users\\Administrator\\Desktop\\opencv\\faceDetection.png"; // System.out.println(String.format("Writing %s", filename)); // Highgui.imwrite(filename, image); } public static void setAlpha(String imagePath,String outFile) { /** * 增加測試項 * 讀取圖片,繪制成半透明 */ try { ImageIcon imageIcon = new ImageIcon(imagePath); BufferedImage bufferedImage = new BufferedImage(imageIcon.getIconWidth(),imageIcon.getIconHeight() , BufferedImage.TYPE_4BYTE_ABGR); Graphics2D g2D = (Graphics2D) bufferedImage.getGraphics(); g2D.drawImage(imageIcon.getImage(), 0, 0, imageIcon.getImageObserver()); //循環每一個像素點,改變像素點的Alpha值 int alpha = 100; for (int j1 = bufferedImage.getMinY(); j1 < bufferedImage.getHeight(); j1++) { for (int j2 = bufferedImage.getMinX(); j2 < bufferedImage.getWidth(); j2++) { int rgb = bufferedImage.getRGB(j2, j1); rgb = ( (alpha + 1) << 24) | (rgb & 0x00ffffff); bufferedImage.setRGB(j2, j1, rgb); } } g2D.drawImage(bufferedImage, 0, 0, imageIcon.getImageObserver()); //生成圖片為PNG ImageIO.write(bufferedImage, "png", new File(outFile)); } catch (Exception e) { e.printStackTrace(); } } private static void watermark(String a,String b,String outFile, float alpha) throws IOException { // 獲取底圖 BufferedImage buffImg = ImageIO.read(new File(a)); // 獲取層圖 BufferedImage waterImg = ImageIO.read(new File(b)); // 創建Graphics2D對象,用在底圖對象上繪圖 Graphics2D g2d = buffImg.createGraphics(); int waterImgWidth = waterImg.getWidth();// 獲取層圖的寬度 int waterImgHeight = waterImg.getHeight();// 獲取層圖的高度 // 在圖形和圖像中實現混合和透明效果 g2d.setComposite(AlphaComposite.getInstance(AlphaComposite.SRC_ATOP, alpha)); // 繪制 g2d.drawImage(waterImg, 0, 0, waterImgWidth, waterImgHeight, null); g2d.dispose();// 釋放圖形上下文使用的系統資源 //生成圖片為PNG ImageIO.write(buffImg, "png", new File(outFile)); } public static boolean mergeSimple(BufferedImage image1, BufferedImage image2, int posw, int posh, File fileOutput) { //合并兩個圖像 int w1 = image1.getWidth(); int h2 = image1.getHeight(); int w2 = image2.getWidth(); int h3 = image2.getHeight(); BufferedImage imageSaved = new BufferedImage(w1, h2, BufferedImage.TYPE_INT_ARGB); Graphics2D g2d = imageSaved.createGraphics(); // 增加下面代碼使得背景透明 g2d.drawImage(image1, null, 0, 0); image1 = g2d.getDeviceConfiguration().createCompatibleImage(w1, w2, Transparency.TRANSLUCENT); g2d.dispose(); g2d = image1.createGraphics(); // 背景透明代碼結束 // for (int i = 0; i < w2; i++) { // for (int j = 0; j < h3; j++) { // int rgb1 = image1.getRGB(i + posw, j + posh); // int rgb2 = image2.getRGB(i, j); // // if (rgb1 != rgb2) { // //rgb2 = rgb1 & rgb2; // } // imageSaved.setRGB(i + posw, j + posh, rgb2); // } // } boolean b = false; try { b = ImageIO.write(imageSaved, "png", fileOutput); } catch (IOException ie) { ie.printStackTrace(); } return b; } public static void main(String[] args) throws Exception { String a,b,c,d; a = "C:\\Users\\Administrator\\Desktop\\opencv\\zzl.jpg"; d = "C:\\Users\\Administrator\\Desktop\\opencv\\cgx.jpg"; //process(a,d); a = "C:\\Users\\Administrator\\Desktop\\opencv\\zzlCut.jpg"; d = "C:\\Users\\Administrator\\Desktop\\opencv\\cgxCut.jpg"; CascadeClassifier faceDetector = new CascadeClassifier( "C:\\Users\\Administrator\\Desktop\\opencv\\haarcascade_frontalface_alt.xml"); CascadeClassifier eyeDetector1 = new CascadeClassifier( "C:\\Users\\Administrator\\Desktop\\opencv\\haarcascade_eye.xml"); CascadeClassifier eyeDetector2 = new CascadeClassifier( "C:\\Users\\Administrator\\Desktop\\opencv\\haarcascade_eye_tree_eyeglasses.xml"); Mat image = Highgui.imread("C:\\Users\\Administrator\\Desktop\\opencv\\gakki.jpg"); // 在圖片中檢測人臉 MatOfRect faceDetections = new MatOfRect(); //eyeDetector2.detectMultiScale(image, faceDetections); Vector<Rect> objects; eyeDetector1.detectMultiScale(image, faceDetections, 2.0,1,1,new Size(20,20),new Size(20,20)); Rect[] rects = faceDetections.toArray(); Rect eyea,eyeb; eyea = rects[0];eyeb = rects[1]; System.out.println("a-中心坐標 " + eyea.x + " and " + eyea.y); System.out.println("b-中心坐標 " + eyeb.x + " and " + eyeb.y); //獲取兩個人眼的角度 double dy=(eyeb.y-eyea.y); double dx=(eyeb.x-eyea.x); double len=Math.sqrt(dx*dx+dy*dy); System.out.println("dx is "+dx); System.out.println("dy is "+dy); System.out.println("len is "+len); double angle=Math.atan2(Math.abs(dy),Math.abs(dx))*180.0/Math.PI; System.out.println("angle is "+angle); for(Rect rect:faceDetections.toArray()) { Core.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 255, 0)); } String filename = "C:\\Users\\Administrator\\Desktop\\opencv\\ouput.png"; System.out.println(String.format("Writing %s", filename)); Highgui.imwrite(filename, image); // watermark(a,d,"C:\\Users\\Administrator\\Desktop\\opencv\\zzlTm2.jpg",0.7f); // // // 讀取圖像,不改變圖像的原始信息 // Mat image1 = Highgui.imread(a); // Mat image2 = Highgui.imread(d); // Mat mat1 = new Mat();Mat mat2 = new Mat(); // Size size = new Size(300, 300); // Imgproc.resize(image1, mat1, size); // Imgproc.resize(image2, mat2, size); // Mat mat3 = new Mat(size,CvType.CV_64F); // //Core.addWeighted(mat1, 0.5, mat2, 1, 0, mat3); // // //Highgui.imwrite("C:\\Users\\Administrator\\Desktop\\opencv\\add.jpg", mat3); // // mergeSimple(ImageIO.read(new File(a)), // ImageIO.read(new File(d)),0,0, // new File("C:\\Users\\Administrator\\Desktop\\opencv\\add.jpg")); } }
最終效果:人臉旁有綠色邊框,可以將綠色邊框圖片截取,生成人臉圖
看完了這篇文章,相信你對“OPENCV+JAVA如何實現人臉識別”有了一定的了解,如果想了解更多相關知識,歡迎關注億速云行業資訊頻道,感謝各位的閱讀!
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。