1、從初始UIImage獲取一個CIImage對象。
2、創建一個用於分析對象的CIContext。
3、通過type和options參數創建一個CIDetector實例。
type參數指定了要識別的特征類型。options參數可以設置識別特征的精確度,低精確度速度快,高精確度更准確。
4、創建一個圖像數組,里面放對象的實例。
5、通過imageByCroppingToRect:方法結合原始圖像以及在圖像中找到的最后一個實例對象中指定的邊界創建一個CIImage。這些邊界表示人臉所在的CGRect。
6、通過CIImage創建一個UIImage,並在ImageView中顯示。
//self.mainImageView.image選取的圖片
- (IBAction)findFace:(id)sender { UIImage * image = self.mainImageView.image; CIImage * coreImage = [[CIImage alloc] initWithImage:image]; CIContext * context = [CIContext contextWithOptions:nil]; CIDetector * detector = [CIDetector detectorOfType:@"CIDetectorTypeFace"context:context options:[NSDictionary dictionaryWithObjectsAndKeys:@"CIDetectorAccuracyHigh", @"CIDetectorAccuracy", nil]]; NSArray * features = [detector featuresInImage:coreImage]; if ([features count] >0) { CIImage * faceImage = [coreImage imageByCroppingToRect:[[features lastObject] bounds]]; UIImage * face = [UIImage imageWithCGImage:[context createCGImage:faceImage fromRect:faceImage.extent]]; self.faceImageView.image = face; [self.findFaceButton setTitle:[NSString stringWithFormat:@"%lu Face(s) Found", (unsigned long)[features count]] forState:UIControlStateNormal]; self.findFaceButton.enabled = NO; self.findFaceButton.alpha = 0.6; } else { [self.findFaceButton setTitle:@"No Faces Found"forState:UIControlStateNormal]; self.findFaceButton.enabled = NO; self.findFaceButton.alpha = 0.6; } }