OCR (Optical Character Recognition,光學字符識別),我們這個練習就是對OCR英文字母進行識別。得到一張OCR圖片后,提取出字符相關的ROI圖像,並且大小歸一化,整個圖像的像素值序列可以直接作為特征。但直接將整個圖像作為特征數據維度太高,計算量太大,所以也可以進行一些降維處理,減少輸入的數據量。
處理過程一般這樣:先對原圖像進行裁剪,得到字符的ROI圖像,二值化。然后將圖像分塊,統計每個小塊中非0像素的個數,這樣就形成了一個較小的矩陣,這矩陣就是新的特征了。opencv為我們提供了一些這樣的數據,放在
\opencv\sources\samples\data\letter-recognition.data
這個文件里,打開看看:
每一行代表一個樣本。第一列大寫的字母,就是標注,隨后的16列就是該字母的特征向量。這個文件中總共有20000行樣本,共分類26類(26個字母)。
我們將這些數據讀取出來后,分成兩部分,第一部分16000個樣本作為訓練樣本,訓練出分類器后,對這16000個訓練數據和余下的4000個數據分別進行測試,得到訓練精度和測試精度。其中adaboost比較特殊一點,訓練和測試樣本各為10000.
完整代碼為:

#include "stdafx.h" #include "opencv2\opencv.hpp" #include <iostream> using namespace std; using namespace cv; using namespace cv::ml; // 讀取文件數據 bool read_num_class_data(const string& filename, int var_count,Mat* _data, Mat* _responses) { const int M = 1024; char buf[M + 2]; Mat el_ptr(1, var_count, CV_32F); int i; vector<int> responses; _data->release(); _responses->release(); FILE *f; fopen_s(&f, filename.c_str(), "rt"); if (!f) { cout << "Could not read the database " << filename << endl; return false; } for (;;) { char* ptr; if (!fgets(buf, M, f) || !strchr(buf, ',')) break; responses.push_back((int)buf[0]); ptr = buf + 2; for (i = 0; i < var_count; i++) { int n = 0; sscanf_s(ptr, "%f%n", &el_ptr.at<float>(i), &n); ptr += n + 1; } if (i < var_count) break; _data->push_back(el_ptr); } fclose(f); Mat(responses).copyTo(*_responses); return true; } //准備訓練數據 Ptr<TrainData> prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples) { Mat sample_idx = Mat::zeros(1, data.rows, CV_8U); Mat train_samples = sample_idx.colRange(0, ntrain_samples); train_samples.setTo(Scalar::all(1)); int nvars = data.cols; Mat var_type(nvars + 1, 1, CV_8U); var_type.setTo(Scalar::all(VAR_ORDERED)); var_type.at<uchar>(nvars) = VAR_CATEGORICAL; return TrainData::create(data, ROW_SAMPLE, responses, noArray(), sample_idx, noArray(), var_type); } //設置迭代條件 inline TermCriteria TC(int iters, double eps) { return TermCriteria(TermCriteria::MAX_ITER + (eps > 0 ? TermCriteria::EPS : 0), iters, eps); } //分類預測 void test_and_save_classifier(const Ptr<StatModel>& model, const Mat& data, const Mat& responses, int ntrain_samples, int rdelta) { int i, nsamples_all = data.rows; double train_hr = 0, test_hr = 0; // compute prediction error on train and test data for (i = 0; i < nsamples_all; i++) { Mat sample = data.row(i); float r = model->predict(sample); r = std::abs(r + rdelta - responses.at<int>(i)) <= FLT_EPSILON ? 1.f : 0.f; if (i < ntrain_samples) train_hr += r; else test_hr += r; } test_hr /= nsamples_all - ntrain_samples; train_hr = ntrain_samples > 0 ? train_hr / ntrain_samples : 1.; printf("Recognition rate: train = %.1f%%, test = %.1f%%\n", train_hr*100., test_hr*100.); } //隨機樹分類 bool build_rtrees_classifier(const string& data_filename) { Mat data; Mat responses; read_num_class_data(data_filename, 16, &data, &responses); int nsamples_all = data.rows; int ntrain_samples = (int)(nsamples_all*0.8); Ptr<RTrees> model; Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples); model = RTrees::create(); model->setMaxDepth(10); model->setMinSampleCount(10); model->setRegressionAccuracy(0); model->setUseSurrogates(false); model->setMaxCategories(15); model->setPriors(Mat()); model->setCalculateVarImportance(true); model->setActiveVarCount(4); model->setTermCriteria(TC(100, 0.01f)); model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, 0); cout << "Number of trees: " << model->getRoots().size() << endl; // Print variable importance Mat var_importance = model->getVarImportance(); if (!var_importance.empty()) { double rt_imp_sum = sum(var_importance)[0]; printf("var#\timportance (in %%):\n"); int i, n = (int)var_importance.total(); for (i = 0; i < n; i++) printf("%-2d\t%-4.1f\n", i, 100.f*var_importance.at<float>(i) / rt_imp_sum); } return true; } //adaboost分類 bool build_boost_classifier(const string& data_filename) { const int class_count = 26; Mat data; Mat responses; Mat weak_responses; read_num_class_data(data_filename, 16, &data, &responses); int i, j, k; Ptr<Boost> model; int nsamples_all = data.rows; int ntrain_samples = (int)(nsamples_all*0.5); int var_count = data.cols; Mat new_data(ntrain_samples*class_count, var_count + 1, CV_32F); Mat new_responses(ntrain_samples*class_count, 1, CV_32S); for (i = 0; i < ntrain_samples; i++) { const float* data_row = data.ptr<float>(i); for (j = 0; j < class_count; j++) { float* new_data_row = (float*)new_data.ptr<float>(i*class_count + j); memcpy(new_data_row, data_row, var_count*sizeof(data_row[0])); new_data_row[var_count] = (float)j; new_responses.at<int>(i*class_count + j) = responses.at<int>(i) == j + 'A'; } } Mat var_type(1, var_count + 2, CV_8U); var_type.setTo(Scalar::all(VAR_ORDERED)); var_type.at<uchar>(var_count) = var_type.at<uchar>(var_count + 1) = VAR_CATEGORICAL; Ptr<TrainData> tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses, noArray(), noArray(), noArray(), var_type); vector<double> priors(2); priors[0] = 1; priors[1] = 26; model = Boost::create(); model->setBoostType(Boost::GENTLE); model->setWeakCount(100); model->setWeightTrimRate(0.95); model->setMaxDepth(5); model->setUseSurrogates(false); model->setPriors(Mat(priors)); model->train(tdata); Mat temp_sample(1, var_count + 1, CV_32F); float* tptr = temp_sample.ptr<float>(); // compute prediction error on train and test data double train_hr = 0, test_hr = 0; for (i = 0; i < nsamples_all; i++) { int best_class = 0; double max_sum = -DBL_MAX; const float* ptr = data.ptr<float>(i); for (k = 0; k < var_count; k++) tptr[k] = ptr[k]; for (j = 0; j < class_count; j++) { tptr[var_count] = (float)j; float s = model->predict(temp_sample, noArray(), StatModel::RAW_OUTPUT); if (max_sum < s) { max_sum = s; best_class = j + 'A'; } } double r = std::abs(best_class - responses.at<int>(i)) < FLT_EPSILON ? 1 : 0; if (i < ntrain_samples) train_hr += r; else test_hr += r; } test_hr /= nsamples_all - ntrain_samples; train_hr = ntrain_samples > 0 ? train_hr / ntrain_samples : 1.; printf("Recognition rate: train = %.1f%%, test = %.1f%%\n", train_hr*100., test_hr*100.); cout << "Number of trees: " << model->getRoots().size() << endl; return true; } //多層感知機分類(ANN) bool build_mlp_classifier(const string& data_filename) { const int class_count = 26; Mat data; Mat responses; read_num_class_data(data_filename, 16, &data, &responses); Ptr<ANN_MLP> model; int nsamples_all = data.rows; int ntrain_samples = (int)(nsamples_all*0.8); Mat train_data = data.rowRange(0, ntrain_samples); Mat train_responses = Mat::zeros(ntrain_samples, class_count, CV_32F); // 1. unroll the responses cout << "Unrolling the responses...\n"; for (int i = 0; i < ntrain_samples; i++) { int cls_label = responses.at<int>(i) -'A'; train_responses.at<float>(i, cls_label) = 1.f; } // 2. train classifier int layer_sz[] = { data.cols, 100, 100, class_count }; int nlayers = (int)(sizeof(layer_sz) / sizeof(layer_sz[0])); Mat layer_sizes(1, nlayers, CV_32S, layer_sz); #if 1 int method = ANN_MLP::BACKPROP; double method_param = 0.001; int max_iter = 300; #else int method = ANN_MLP::RPROP; double method_param = 0.1; int max_iter = 1000; #endif Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses); model = ANN_MLP::create(); model->setLayerSizes(layer_sizes); model->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0); model->setTermCriteria(TC(max_iter, 0)); model->setTrainMethod(method, method_param); model->train(tdata); return true; } //K最近鄰分類 bool build_knearest_classifier(const string& data_filename, int K) { Mat data; Mat responses; read_num_class_data(data_filename, 16, &data, &responses); int nsamples_all = data.rows; int ntrain_samples = (int)(nsamples_all*0.8); Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples); Ptr<KNearest> model = KNearest::create(); model->setDefaultK(K); model->setIsClassifier(true); model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, 0); return true; } //貝葉斯分類 bool build_nbayes_classifier(const string& data_filename) { Mat data; Mat responses; read_num_class_data(data_filename, 16, &data, &responses); int nsamples_all = data.rows; int ntrain_samples = (int)(nsamples_all*0.8); Ptr<NormalBayesClassifier> model; Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples); model = NormalBayesClassifier::create(); model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, 0); return true; } //svm分類 bool build_svm_classifier(const string& data_filename) { Mat data; Mat responses; read_num_class_data(data_filename, 16, &data, &responses); int nsamples_all = data.rows; int ntrain_samples = (int)(nsamples_all*0.8); Ptr<SVM> model; Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples); model = SVM::create(); model->setType(SVM::C_SVC); model->setKernel(SVM::LINEAR); model->setC(1); model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, 0); return true; } int main() { string data_filename = "E:/opencv/opencv/sources/samples/data/letter-recognition.data"; //字母數據 cout << "svm分類:" << endl; build_svm_classifier(data_filename); cout << "貝葉斯分類:" << endl; build_nbayes_classifier(data_filename); cout << "K最近鄰分類:" << endl; build_knearest_classifier(data_filename,10); cout << "隨機樹分類:" << endl; build_rtrees_classifier(data_filename); //cout << "adaboost分類:" << endl; //build_boost_classifier(data_filename); //cout << "ANN(多層感知機)分類:" << endl; //build_mlp_classifier(data_filename); }
由於adaboost分類和 ann分類速度非常慢,因此我在main函數里把這兩個分類注釋掉了,大家有興趣和時間可以測試一下。
結果:
從結果顯示來看,測試的四種分類算法中,KNN(最近鄰)分類精度是最高的。所以說,對ocr進行識別,還是用knn最好。