Python實現樸素貝葉斯演算法
1、演算法介紹
樸素貝葉斯假設特徵相互獨立,於是有下面的公式: x表示特徵向量,c表示類別。
原理: 通過數據集計算出公式中右邊分子中的各個概率。預測時根據公式計算出該樣本屬於不同類別的概率,選擇概率大的那個類別作為預測值。
2、演算法流程
模型中各個概率的計算,看下面代碼中的 trainNB() 函數。
3、Python代碼實現及注釋
import numpy as np
# 創建數據集
def loadDataSet():
# 每個樣本為一段文字
postingList=[["my", "dog", "has", "flea", "problems", "help", "please"],
["maybe", "not", "take", "him", "to", "dog", "park", "stupid"],
["my", "dalmation", "is", "so", "cute", "I", "love", "him"],
["stop", "posting", "stupid", "worthless", "garbage"],
["mr", "licks", "ate", "my", "steak", "how", "to", "stop", "him"],
["quit", "buying", "worthless", "dog", "food", "stupid"]]
# 1表示不好,0表示好
classVec = [0, 1, 0, 1, 0, 1]
return postingList, classVec
# 從數據集中生成辭彙表
def createVocabList(dataSet):
# 創建空集合
vocabSet = set([])
# 提取數據集中所有的辭彙,不重複
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
# 把文本轉化為特徵向量
def setOfWords2Vec(vocabList, inputSet):
# 創建特徵向量,全0
returnVec = [0]*len(vocabList)
# 文本中出現的辭彙相應的在特徵向量中對應的位置置1
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print("the word: %s is not in my Vocabulary!" % word)
return returnVec
# 創建貝葉斯分類器,輸入用特徵向量表示的數據集及數據集的標記
def trainNB(trainMatrix,trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
# 計算數據集中不好語句的佔比
pAbusive = sum(trainCategory)/float(numTrainDocs)
# 存放不同類別中各辭彙出現的次數
p0Num = np.ones(numWords)
p1Num = np.ones(numWords)
# 存放不同類別中辭彙的總數
p0Denom = 2.0
p1Denom = 2.0
# 遍曆數據集,計算上述四個值
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
# 計算不同類別中各辭彙出現的概率
p1Vect = np.log(p1Num/p1Denom)
p0Vect = np.log(p0Num/p0Denom)
return p0Vect, p1Vect, pAbusive
# 用樸素的貝葉斯分類
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
# 計算該樣本屬於不同類別的概率,注意加了一個log
p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)
p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)
# 根據概率預測結果
if p1 > p0:
return 1
else:
return 0
# 把數據集轉換為用特徵向量表示樣本的數據集
def getTrainMat(trainSet, myVocabList):
trainMat = []
for example in trainSet:
trainMat.append(setOfWords2Vec(myVocabList, example))
return trainMat
if __name__ == "__main__":
# 載入數據集
trainSet, labels = loadDataSet()
# 構建辭彙表
myVocabList = createVocabList(trainSet)
# 得到用特徵向量表示樣本的數據集
trainMat = getTrainMat(trainSet, myVocabList)
# 樸素的貝葉斯分類器得到模型參數
p0V, p1V, pAb = trainNB(trainMat, labels)
# print(p0V, "
", p1V, "
", pAb)
# 測試分類器
test = ["cute", "has"]
testVec = np.array(setOfWords2Vec(myVocabList, test))
predict = classifyNB(testVec, p0V, p1V, pAb)
print(test, "be predicted as ", predict)
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101


※安卓okhttp3與伺服器通過json數據交互解析與上傳
※python腳本實現定時發送郵件
TAG:程序員小新人學習 |