欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页  >  IT编程

Python构建图像分类识别器的方法

程序员文章站 2023-11-05 11:48:28
机器学习用在图像识别是非常有趣的话题。 我们可以利用opencv强大的功能结合机器学习算法实现图像识别系统。 首先,输入若干图像,加入分类标记。利用向量量化方法将特征点...

机器学习用在图像识别是非常有趣的话题。

我们可以利用opencv强大的功能结合机器学习算法实现图像识别系统。

首先,输入若干图像,加入分类标记。利用向量量化方法将特征点进行聚类,并得出中心点,这些中心点就是视觉码本的元素。

其次,利用图像分类器将图像分到已知的类别中,erf(极端随机森林)算法非常流行,因为erf具有较快的速度和比较精确的准确度。我们利用决策树进行正确决策。

最后,利用训练好的erf模型后,创建目标识别器,可以识别未知图像的内容。

当然,这只是雏形,存在很多问题:

界面不友好。

准确率如何保证,如何调整超参数,只有认真研究算法机理,才能真正清除内部实现机制后给予改进。

下面,上代码:

import os

import sys
import argparse
import json
import cv2
import numpy as np
from sklearn.cluster import kmeans
# from star_detector import starfeaturedetector
from sklearn.ensemble import extratreesclassifier
from sklearn import preprocessing

try:
 import cpickle as pickle #python 2
except importerror as e:
 import pickle #python 3

def load_training_data(input_folder):
 training_data = []
 if not os.path.isdir(input_folder):
  raise ioerror("the folder " + input_folder + " doesn't exist")
  
 for root, dirs, files in os.walk(input_folder):
  for filename in (x for x in files if x.endswith('.jpg')):
   filepath = os.path.join(root, filename)
   print(filepath)
   object_class = filepath.split('\\')[-2]
   print("object_class",object_class)
   training_data.append({'object_class': object_class, 'image_path': filepath})
     
 return training_data
class starfeaturedetector(object):
 def __init__(self):
  self.detector = cv2.xfeatures2d.stardetector_create()
 def detect(self, img):
  return self.detector.detect(img)

class featurebuilder(object):
 def extract_features(self, img):
  keypoints = starfeaturedetector().detect(img)
  keypoints, feature_vectors = compute_sift_features(img, keypoints)
  return feature_vectors
 def get_codewords(self, input_map, scaling_size, max_samples=12):
  keypoints_all = []
  count = 0
  cur_class = ''
  for item in input_map:
   if count >= max_samples:
    if cur_class != item['object_class']:
     count = 0
    else:
     continue
   count += 1
   if count == max_samples:
    print ("built centroids for", item['object_class'])

   cur_class = item['object_class']
   img = cv2.imread(item['image_path'])
   img = resize_image(img, scaling_size)
   num_dims = 128
   feature_vectors = self.extract_features(img)
   keypoints_all.extend(feature_vectors)

  kmeans, centroids = bagofwords().cluster(keypoints_all)
  return kmeans, centroids
class bagofwords(object):
 def __init__(self, num_clusters=32):
  self.num_dims = 128
  self.num_clusters = num_clusters
  self.num_retries = 10

 def cluster(self, datapoints):
  kmeans = kmeans(self.num_clusters, 
      n_init=max(self.num_retries, 1),
      max_iter=10, tol=1.0)
  res = kmeans.fit(datapoints)
  centroids = res.cluster_centers_
  return kmeans, centroids

 def normalize(self, input_data):
  sum_input = np.sum(input_data)

  if sum_input > 0:
   return input_data / sum_input
  else:
   return input_data
 def construct_feature(self, img, kmeans, centroids):
  keypoints = starfeaturedetector().detect(img)
  keypoints, feature_vectors = compute_sift_features(img, keypoints)
  labels = kmeans.predict(feature_vectors)
  feature_vector = np.zeros(self.num_clusters)

  for i, item in enumerate(feature_vectors):
   feature_vector[labels[i]] += 1

  feature_vector_img = np.reshape(feature_vector, ((1, feature_vector.shape[0])))
  return self.normalize(feature_vector_img)
# extract features from the input images and 
# map them to the corresponding object classes
def get_feature_map(input_map, kmeans, centroids, scaling_size):
 feature_map = []
 for item in input_map:
  temp_dict = {}
  temp_dict['object_class'] = item['object_class']
 
  print("extracting features for", item['image_path'])
  img = cv2.imread(item['image_path'])
  img = resize_image(img, scaling_size)

  temp_dict['feature_vector'] = bagofwords().construct_feature(img, kmeans, centroids)
  if temp_dict['feature_vector'] is not none:
   feature_map.append(temp_dict)
 return feature_map

# extract sift features
def compute_sift_features(img, keypoints):
 if img is none:
  raise typeerror('invalid input image')

 img_gray = cv2.cvtcolor(img, cv2.color_bgr2gray)
 keypoints, descriptors = cv2.xfeatures2d.sift_create().compute(img_gray, keypoints)
 return keypoints, descriptors

# resize the shorter dimension to 'new_size' 
# while maintaining the aspect ratio
def resize_image(input_img, new_size):
 h, w = input_img.shape[:2]
 scaling_factor = new_size / float(h)

 if w < h:
  scaling_factor = new_size / float(w)

 new_shape = (int(w * scaling_factor), int(h * scaling_factor))
 return cv2.resize(input_img, new_shape)

def build_features_main():
 data_folder = 'training_images\\'
 scaling_size = 200
 codebook_file='codebook.pkl'
 feature_map_file='feature_map.pkl'
 # load the training data
 training_data = load_training_data(data_folder)

 # build the visual codebook
 print("====== building visual codebook ======")
 kmeans, centroids = featurebuilder().get_codewords(training_data, scaling_size)
 if codebook_file:
  with open(codebook_file, 'wb') as f:
   pickle.dump((kmeans, centroids), f)
 
 # extract features from input images
 print("\n====== building the feature map ======")
 feature_map = get_feature_map(training_data, kmeans, centroids, scaling_size)
 if feature_map_file:
  with open(feature_map_file, 'wb') as f:
   pickle.dump(feature_map, f)
# --feature-map-file feature_map.pkl --model- file erf.pkl
#----------------------------------------------------------------------------------------------------------
class erftrainer(object):
 def __init__(self, x, label_words):
  self.le = preprocessing.labelencoder()
  self.clf = extratreesclassifier(n_estimators=100,
    max_depth=16, random_state=0)

  y = self.encode_labels(label_words)
  self.clf.fit(np.asarray(x), y)

 def encode_labels(self, label_words):
  self.le.fit(label_words)
  return np.array(self.le.transform(label_words), dtype=np.float32)

 def classify(self, x):
  label_nums = self.clf.predict(np.asarray(x))
  label_words = self.le.inverse_transform([int(x) for x in label_nums])
  return label_words
#------------------------------------------------------------------------------------------

class imagetagextractor(object):
 def __init__(self, model_file, codebook_file):
  with open(model_file, 'rb') as f:
   self.erf = pickle.load(f)

  with open(codebook_file, 'rb') as f:
   self.kmeans, self.centroids = pickle.load(f)

 def predict(self, img, scaling_size):
  img = resize_image(img, scaling_size)
  feature_vector = bagofwords().construct_feature(
    img, self.kmeans, self.centroids)
  image_tag = self.erf.classify(feature_vector)[0]
  return image_tag

def train_recognizer_main():
 feature_map_file = 'feature_map.pkl'
 model_file = 'erf.pkl'
 # load the feature map
 with open(feature_map_file, 'rb') as f:
  feature_map = pickle.load(f)
 # extract feature vectors and the labels
 label_words = [x['object_class'] for x in feature_map]
 dim_size = feature_map[0]['feature_vector'].shape[1]
 x = [np.reshape(x['feature_vector'], (dim_size,)) for x in feature_map]

 # train the extremely random forests classifier
 erf = erftrainer(x, label_words)
 if model_file:
  with open(model_file, 'wb') as f:
   pickle.dump(erf, f)
 #--------------------------------------------------------------------
 # args = build_arg_parser().parse_args()
 model_file = 'erf.pkl'
 codebook_file ='codebook.pkl'
 import os
 rootdir=r"f:\airplanes"
 list=os.listdir(rootdir)
 for i in range(0,len(list)):
  path=os.path.join(rootdir,list[i])
  if os.path.isfile(path):
   try:
    print(path)
    input_image = cv2.imread(path)
    scaling_size = 200
    print("\noutput:", imagetagextractor(model_file,codebook_file)\
      .predict(input_image, scaling_size))
   except:
    continue
 #-----------------------------------------------------------------------
build_features_main()
train_recognizer_main()

以上这篇python构建图像分类识别器的方法就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。