numpy.core._exceptions.UFuncTypeError: ufunc 'subtract' did not contain a loop with signature matching types (dtype(' None


機器學習實戰的Logistic回歸梯度上升優化算法中遇到了這個問題

numpy.core._exceptions.UFuncTypeError: ufunc 'subtract' did not contain a loop with signature matching types (dtype('<U1'), dtype('float64')) -> None

代碼如下

import math
import numpy as np


def loadDataSet():
    dataSet = []
    labelSet = []
    with open('testSet.txt') as fbj:
        for line in fbj.readlines():
            lineArr = line.strip().split()
            # print(lineArr)
            dataSet.append([1.0, float(lineArr[0]), float(lineArr[1])])
            labelSet.append(lineArr[2])
    return dataSet, labelSet


def sigmoid(inX):
    result = 1/(1+np.exp(-inX))
    return result


def gradAscent(dataSet, labelSet):
    dataMtrix = np.mat(dataSet)
    labelMat = np.mat(labelSet).transpose()
    m, n = np.shape(dataMtrix)
    alpha = 0.001
    maxCycles = 500
    weights = np.ones((n, 1))
    for _ in range(maxCycles):
        h = sigmoid(dataMtrix * weights)
        error = labelMat - h
        weights = weights + alpha * dataMtrix.transpose() * error
    return weights


dataSet, labelSet = loadDataSet()
# print(dataSet)
# print(labelSet)
print(gradAscent(dataSet, labelSet))

這里報錯說的是數據類型不符不能相減
那么分別查看一下(在jupyter調試)

labelMat.dtype

dtype('<U1')
h.dtype

dtype('float64')

那么解決辦法就是將<U1類型換成float64
但是使用如下方法還是報錯

labelMat.dtype = 'float64'

ValueError: When changing to a larger dtype, its size must be a divisor of the total size in bytes of the last axis of the array.

那么只好乖乖使用astype方法

labelMat = labelMat.astype(np.float64)

修改后的代碼及結果如下

import math
import numpy as np


def loadDataSet():
    dataSet = []
    labelSet = []
    with open('testSet.txt') as fbj:
        for line in fbj.readlines():
            lineArr = line.strip().split()
            # print(lineArr)
            dataSet.append([1.0, float(lineArr[0]), float(lineArr[1])])
            labelSet.append(lineArr[2])
    return dataSet, labelSet


def sigmoid(inX):
    result = 1/(1+np.exp(-inX))
    return result


def gradAscent(dataSet, labelSet):
    dataMtrix = np.mat(dataSet)
    labelMat = np.mat(labelSet).transpose()
    labelMat = labelMat.astype(np.float64)
    m, n = np.shape(dataMtrix)
    alpha = 0.001
    maxCycles = 500
    weights = np.ones((n, 1))
    for _ in range(maxCycles):
        h = sigmoid(dataMtrix * weights)
        error = labelMat - h
        weights = weights + alpha * dataMtrix.transpose() * error
    return weights


dataSet, labelSet = loadDataSet()
# print(dataSet)
# print(labelSet)
print(gradAscent(dataSet, labelSet))


[[ 4.12414349]
 [ 0.48007329]
 [-0.6168482 ]]


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM