源代码与资源文件
#本文适用读者:#
了解朴素贝叶斯,并且python version 为 3.x
###朴素贝叶斯的优缺点及其适用性:
优点:在数据较少的情况下仍然有效,可以处理多类别问题
缺点:对于输入数据的准备方式较为敏感
使用数据类型:标称型数据
###朴素贝叶斯的一般过程:
1.收集数据:可以使用任何方法.本章使用RSS源
2.准备数据:需要数值型或者布尔型数据
3.分析数据:有大量特征时,绘制特征作用不大,此时使用直方图效果更好
4.训练算法:计算不同的独立特征的条件概率
5.测试算法:计算错误率
6.使用算法:一个常见的朴素贝叶斯应用是文档分类.可以在任意的分类场景中使用朴素贝叶斯分类器,不一定非要是文本
###词表到向量的转换函数
将以下代码写入到bayes.py文件中
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1]
def createVocabList(dataSet): #单词去重
#0 创建一个空集
vocabSet = set([])
for document in dataSet:
#创建两个集合的并集
vocabSet = vocabSet | set(document)
return list(vocabSet)
return postingList,classVec
def setOfWords2Vec(vocabList, inputSet):#单词查重,转换成01向量
#创建一个其中所含元素都为0的向量
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print("the word: %s is not in my Vocabulary!" %word)
return returnVec
####测试一下:
import bayes
list0posts,listClasses = bayes.loadDataSet()
myVocabList = bayes.createVocabList(list0posts)
myVocabList
结果:
bayes.setOfWords2Vec(myVocabList, list0posts[0])
结果:
###从词向量计算概率:
####朴素贝叶斯分类器训练函数:
将以下代码写入bayes.py文件中
#param
# @trainMatrix 文档向量矩阵
# @trainCategory 每篇文档类别标签所构成的向量
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix) #文档数量
numWords = len(trainMatrix[0]) #文档单词数量
pAbusive = sum(trainCategory)/float(numTrainDocs) #计算侮辱性文档概率
#(以下两行) 初始化向量所有元素的概率为1
p0Num = ones(numWords);p1Num = ones(numWords)
p0Denom = 2.0;p1Denom = 2.0
for i in range(numTrainDocs): #对每篇文档向量相加
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
#对向量中的每个元素进行log(元素/侮辱性出现的总次数)运算
p1Vect = log(p1Num / p1Denom)
p0Vect = log(p0Num / p0Denom)
#取对数是为了防止浮点数向下溢出,保证数据的精确性
return p0Vect,p1Vect,pAbusive
####测试一下:
list0posts,listClasses = bayes.loadDataSet()
trainMat = []
for postinDoc in list0posts:
trainMat.append(bayes.setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = bayes.trainNB0(trainMat, listClasses)
pAb
结果:
p0V
结果:
p1V
结果:
###朴素贝叶斯分类函数
将以下代码写入bayes.py中
#param
# @vec2Classify:文档查重向量
# @p0Vec:非侮辱性向量
# @p1Vec:侮辱性向量
# @pClass1:侮辱性概率
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):#
#元素相乘
p1 = sum(vec2Classify * p1Vec) + log(pClass1)
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
def testingNB():#测试函数
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(array(trainMat), array(listClasses))
testEntry = ['love','my','dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry,'classified as:', classifyNB(thisDoc, p0V,p1V,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList,testEntry))
print(testEntry,'classified as:', classifyNB(thisDoc, p0V,p1V,pAb))
####测试一下
import importlib
importlib.reload(bayes)
bayes.testingNB()
结果:
###准备数据:文档词袋模型
#该函数与setOfWords2Vec只有一点不同:将 '= 1'修改为 '+= 1'
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
###准备数据:切分文本
mySent = 'This book is the best book on Python or M.L, I have ever laid eyes upon.'
mySent.split()
结果:
import re
regEx = re.compile('\\W*')
listOfTokens = regEx.split(mySent)
listOfTokens
结果:
#忽略大小写
[tok for tok in listOfTokens if len(tok) > 0]
结果:
###测试算法:使用朴素贝叶斯进行交叉验证
将以下代码写入文件bayes.py中
#切分文本,并忽略大小写
def textParse(bigString):
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
#测试函数
def spamTest():
docList = []; classList = []; fullText = []
for i in range(1,26):#文档数据处理
#(以下七行) 导入并解析文本文件
wordList = textParse(open('email/spam/%d.txt' % i,encoding="ISO-8859-1")
.read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i,encoding="ISO-8859-1").
read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)
trainingSet = list(range(50)); testSet = []
# (以下四行) 随机构建训练集
for i in range(10):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat = []; trainClasses = []
for docIndex in trainingSet :
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat), array(trainClasses))
errorCount = 0
#(以下四行) 对测试集分类
for docIndex in testSet:
wordVector = setOfWords2Vec(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print('the error rate is:', float(errorCount)/len(testSet))
####测试一下:
for i in range(10):
bayes.spamTest()
结果
###使用朴素贝叶斯分类器从个人广告中获取区域倾向
####导入RSS源
import feedparser
ny = feedparser.parse('http://newyork.craigslist.org/stp/index.rss')
sf = feedparser.parse('http://sfbay.craigslist.org/stp/index.rss')
ny['entries']
len(ny['entries'])
结果:
0
很明显,这个网站的数据是空的
所以我们要换一下rss源,故修改上面代码为:
import feedparser
ny = feedparser.parse('http://www.cppblog.com/kevinlynx/category/6337.html/rss')
sf = feedparser.parse('http://blog.163.com/cbn.weekly/rss/')
ny
结果:
###RSS源分类器及高频词去除函数
def calcMostFreq(vocabList, fullText):
import operator
freqDict = {}
for token in vocabList:
freqDict[token] = fullText.count(token)
sortedFreq = sorted(freqDict.items(), key=operator.itemgetter(1), reverse=Tr
ue)
return sortedFreq[:30]
def localWords(feed1, feed0):
import feedparser
docList = []; classList = []; fullText = []
minLen = min(len(feed1['entries']), len(feed0['entries']))
for i in range(minLen):
#每次访问一条RSS源
wordList = textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
#(以下四行)去掉出现次数最高的那些词
vocabList = createVocabList(docList)
top30Words = calcMostFreq(vocabList, fullText)
for pairW in top30Words:
if pairW[0] in vocabList:
vocabList.remove(pairW[0])
trainingSet = list(range(2*minLen)); testSet = []
for i in range(20):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat = []; trainClasses = []
for docIndex in trainingSet:
trainMat.append(bagOfWords2VecMN(vocabList,docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet:
wordVector = bagOfWords2VecMN(vocabList,docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print('the error rate is: ', float(errorCount) / len(testSet))
return vocabList,p0V,p1V
####测试一下:
for i in range(2):
vocabList,pSF,pNY = bayes.localWords(ny,sf)
结果:
###分析数据:显示地域相关的用词
将以下代码写入文件bayes.py中
def getTopWords(ny,sf):
import operator
vocabList,p0V,p1V = localWords(ny,sf)
topNY = []; topSF = [];
for i in range(len(p0V)):
if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i]))
if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i]))
sortedSF = sorted(topSF, key=lambda pair: pair[1],reverse=True)
print ("SF**SF**SF**SF**SF*SF**SF**SF**SF")
for item in sortedSF:
print(item[0])
sortedNY = sorted(topNY, key=lambda pair: pair[1],reverse=True)
print("NY**NY**NY**NY**NY**NY**NY**NY**NY")
for item in sortedNY:
print (item[0])
####测试一下:
import importlib
importlib.reload(bayes)
bayes.getTopWords(ny,sf)
结果:
###总结:
朴素贝叶斯通过特征之间的条件独立性假设,降低对数据量的需求,尽管条件独立性假设并不正确,但是他仍然是一种有效的分类器