NLP学习笔记:搭建一个分词工具(枚举算法和维特比算法)

1.使用枚举算法实现分词

根据已有的词典进行分词,将所有分词的可能性列举出来,并且根据各个词出现概率进行path求和,得到的最大或者-log的最小,即为最好的分词结果。并将结果返回

算法实现如下:

# TODO: 第一步

import  xlrd
import numpy as np
# : 从dic.txt中读取所有中文词。

workbook = xlrd.open_workbook("data/综合类中文词库.xlsx")

dic_words =  []   # 保存词典库中读取的单词

booksheet = workbook.sheet_by_index(0)

rows = booksheet.get_rows()
max_len_word = 0
for row in rows:
    dic_words.append(row[0].value)
    if len(row[0].value) > max_len_word:
        max_len_word=len(row[0].value)
print(dic_words.__len__())
print(max_len_word)
# 以下是每一个单词出现的概率。为了问题的简化,我们只列出了一小部分单词的概率。 在这里没有出现的的单词但是出现在词典里的,统一把概率设置成为0.00001
# 比如 p("学院")=p("概率")=...0.00001

def word_segementation(input_str):
    segments = []
    if len(input_str) == 0:
        return segments
    max_split = min(len(input_str), max_len_word) + 1
    for idx in range(1, max_split):
        word = input_str[0:idx]

        if word in dic_words:
            segments_substr = word_segementation(input_str[idx:])

            if (segments_substr == []) and (len(input_str[idx:]) == 0):
                segments.append([word])
            else:
                for seg in segments_substr:
                    seg = [word] + seg
                    segments.append(seg)
    return segments

word_prob = {"北京":0.03,"":0.08,"":0.005,"":0.005,"天气":0.06,"":0.04,"":0.05,"真好":0.04,"":0.01,"真好啊":0.02,
             "":0.01,"今天":0.07,"课程":0.06,"内容":0.06,"":0.05,"":0.03,"很有":0.04,"意思":0.06,"有意思":0.005,"":0.01,
             "":0.005,"经常":0.08,"意见":0.08,"":0.01,"":0.005,"有意见":0.02,"分歧":0.04,"":0.02, "":0.005}


def word_segment_naive(input_str):
    """
    1. 对于输入字符串做分词,并返回所有可行的分词之后的结果。
    2. 针对于每一个返回结果,计算句子的概率
    3. 返回概率最高的最作为最后结果

    input_str: 输入字符串   输入格式:“今天天气好”
    best_segment: 最好的分词结果  输出格式:["今天","天气","好"]
    """
    # TODO: 第一步: 计算所有可能的分词结果,要保证每个分完的词存在于词典里,这个结果有可能会非常多。
    segments = word_segementation(input_str)
    best_segment =[]
    best_score =-999
    for seg in segments:
        sum=0.0
        for min_word in seg:
            if min_word in word_prob:
                t=np.log(word_prob.get(min_word))
                # sum = sum + (-1*np.log(word_prob.get(min_word)))
            else:
                # sum = np.sum(sum, -1 * np.log(0.00001))
                t = np.log(0.00001)
            sum = sum + t
        # print(seg,"得分",sum)
        if sum*-1>best_score:
            best_score=sum
            best_segment=seg

    return best_segment,-best_score

print (word_segment_naive("北京的天气真好啊"))
print (word_segment_naive("今天的课程内容很有意思"))
print (word_segment_naive("经常有意见分歧"))

2.使用维特比算法实现

算法思想:首先创建一个矩阵,然后根据每个单词的出现概率将矩阵内容填充。这样就可以根据句子构成一个有向图,然后在这个有向图中找出最长路径

找最短路径思想使用动态规划 ,设置一个dp数组,dp[i]  表示从0到i个字符最长路径的值

dp[i]=max(dp[i-j]+graph[j][i] ,dp[i])

同时设计last_word数组,用来记录该字符和原字符串的 last_word[i] 到 input[i] 的单词,这样在最后从后往前遍历last_word数组,根据新的下标来分词,知道为0,具体实现如下

import  xlrd
import numpy as np
# : 从dic.txt中读取所有中文词。

workbook = xlrd.open_workbook("data/综合类中文词库.xlsx")

#  hint: 思考一下用什么数据结构来存储这个词典会比较好? 要考虑我们每次查询一个单词的效率。
dic_words =  []   # 保存词典库中读取的单词

booksheet = workbook.sheet_by_index(0)

rows = booksheet.get_rows()
max_len_word = 0
for row in rows:
    dic_words.append(row[0].value)
    if len(row[0].value) > max_len_word:
        max_len_word=len(row[0].value)
print(dic_words.__len__())
print(max_len_word)

word_prob = {"北京":0.03,"":0.08,"":0.005,"":0.005,"天气":0.06,"":0.04,"":0.05,"真好":0.04,"":0.01,"真好啊":0.02,
             "":0.01,"今天":0.07,"课程":0.06,"内容":0.06,"":0.05,"":0.03,"很有":0.04,"意思":0.06,"有意思":0.005,"":0.01,
             "":0.005,"经常":0.08,"意见":0.08,"":0.01,"":0.005,"有意见":0.02,"分歧":0.04,"":0.02, "":0.005}


## TODO 请编写word_segment_viterbi函数来实现对输入字符串的分词

def word_segment_viterbi(input_str):
    """
    1. 基于输入字符串,词典,以及给定的unigram概率来创建DAG(有向图)。
    2. 编写维特比算法来寻找最优的PATH
    3. 返回分词结果

    input_str: 输入字符串   输入格式:“今天天气好”
    best_segment: 最好的分词结果  输出格式:["今天","天气","好"]
    """

    str_len=len(input_str)
    graph =np.zeros((str_len+1,str_len+1))

    for i in range(0,str_len+1):
        for j in range(i,str_len+1):
            if input_str[i:j] in dic_words :
                if input_str[i:j] in word_prob:
                    graph[i][j]=word_prob.get(input_str[i:j])
                else:
                    graph[i][j] =0.00001

    dp=np.zeros(str_len+1)   #一维数组   dp[i]  表示到第 i 个字符的最大path
    last_word=np.zeros(str_len+1)     #一维数组  last_word[i] 表示第i个字符和他之间是一个分词组合
    best_segment=[]
    dp[0]=0
    dp[1]=graph[0][1]
    last_word[0]=0

    for i in range(0,str_len+1):
        for j in range(0,i):#j 到i
            if dp[j]+graph[j][i] >dp[i]:
                dp[i]=dp[j]+graph[j][i]
                last_word[i]=j

    q=str_len
    while last_word[q] !=0 :
        # print(last_word[q],"+",q+1)
        # print(input_str[int(last_word[q]):int(q+1)])
        best_segment.insert(0,input_str[int(last_word[q]):int(q)])
        q=int(last_word[q])
    best_segment.insert(0,input_str[0:q])
    return best_segment
# word_segment_viterbi("北京的天气真好啊")
print(word_segment_viterbi("北京的天气真好啊"))
print(word_segment_viterbi("今天的课程内容很有意思"))
print(word_segment_viterbi("经常有意见分歧")   )

猜你喜欢

转载自www.cnblogs.com/wys-373/p/13393553.html