定義:
支持向量機SVM(Support vector machine)是一種二值分類器方法,其基本是思想是:找到一個能夠將兩類分開的線性可分的直線(或者超平面)。實際上有許多條直線(或超平面)可以將兩類目標分開來,我們要找的其實是這些直線(或超平面)中分割兩類目標時,有最大距離的直線(或超平面)。我們稱這樣的直線或超平面為最佳線性分類器。如下圖:

源碼如下:
#引入庫
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
#創建圖
sess = tf.Session()
#加載數據
iris = datasets.load_iris()
x_vals = np.array([[x[0], x[3]] for x in iris.data])
y_vals = np.array([1 if y == 0 else -1 for y in iris.target])
#分割數據集,80%的數據作為訓練集用來訓練,剩下20%的數據作為測試集用來做測試
train_indices = np.random.choice(len(x_vals),
round(len(x_vals)*0.8),
replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# 聲明批量大小 batch_size = 100 # 初始化占位符 x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32) y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) # 創建變量 A = tf.Variable(tf.random_normal(shape=[2, 1])) b = tf.Variable(tf.random_normal(shape=[1, 1])) # 構建模型 model_output = tf.subtract(tf.matmul(x_data, A), b) # 采用L2正則式 l2_norm = tf.reduce_sum(tf.square(A)) # 聲明alpha參數 alpha = tf.constant([0.01]) term1=tf.subtract(1., tf.multiply(model_output, y_target)) classification_term = tf.reduce_mean(tf.maximum(0., term1)) # 定義損失函數 loss = tf.add(classification_term, tf.multiply(alpha, l2_norm)) # 聲明預測函數和准確度函數 prediction = tf.sign(model_output) accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32)) # 聲明優化器 my_opt = tf.train.GradientDescentOptimizer(0.01) train_step = my_opt.minimize(loss) # 初始化變量 init = tf.global_variables_initializer() sess.run(init)
#迭代訓練
loss_vec = []
train_accuracy = []
test_accuracy = []
for i in range(1000):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = x_vals_train[rand_index]
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
train_acc_temp = sess.run(accuracy, feed_dict={
x_data: x_vals_train,
y_target: np.transpose([y_vals_train])})
train_accuracy.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict={
x_data: x_vals_test,
y_target: np.transpose([y_vals_test])})
test_accuracy.append(test_acc_temp)
# 抽取系數和截距
[[a1], [a2]] = sess.run(A)
[[b]] = sess.run(b)
slope = -a2/a1
y_intercept = b/a1
x1_vals = [d[1] for d in x_vals]
#
best_fit = []
for i in x1_vals:
best_fit.append(slope*i+y_intercept)
setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == 1]
setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == 1]
not_setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == -1]
not_setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == -1]
%matplotlib inline
# 展示分類結果
plt.plot(setosa_x, setosa_y, 'o', label='得病')
plt.plot(not_setosa_x, not_setosa_y, 'x', label='沒得病')
plt.plot(x1_vals, best_fit, 'r-', label='線性分類器', linewidth=3)
plt.ylim([0, 10])
plt.legend(loc='lower right')
plt.title('細胞大小和細胞顏色深度')
plt.xlabel('細胞大小')
plt.ylabel('細胞顏色深度')
plt.show()
# 展示訓練和測試精度
plt.plot(train_accuracy, 'k-', label='訓練精度')
plt.plot(test_accuracy, 'r--', label='測試精度')
plt.title('訓練集和測試集精度')
plt.xlabel('迭代次數')
plt.ylabel('精度')
plt.legend(loc='lower right')
plt.show()
# 損失函數效果
plt.plot(loss_vec, 'k-')
plt.title('損失誤差/迭代次數')
plt.xlabel('迭代次數')
plt.ylabel('損失誤差')
plt.show()
分類結果展示:

精度效果:

損失誤差:

更多干貨請關注:

