机器学习(十五)隐马尔科夫模型-未完待续

一、HMM相关概念

二、前向传导

#coding=utf-8
import numpy as np
#前向传导算法

def Forward(A,B,Pi,O):
    n=O.shape[0]
    m=A.shape[0]
    dis2src=np.zeros((n,m),dtype=np.float32)#dis2src(i,j)用于保留第i层顶点j出现的概率
    dis2src[0,:]=Pi*(B[:,O[0]])#初始化第一层各个顶点与观测t=0的联合概率
    for t in range(n-1):
        t=t+1
        for i in range(m):
            predis=dis2src[t-1,:]
            #第t时刻的所有顶点,到t+1时刻顶点i的距离总和,然后再计算i顶点到观测变量O[t]的距离
            dis2src[t,i]=np.sum(predis*A[:,i])*B[i,O[t]]
    print sum(dis2src[t])

A=np.asarray([0.5,0.2,0.3,0.3,0.5,0.2,0.2,0.3,0.5]).reshape(3,3)
B=np.asarray([0.5,0.5,0.4,0.6,0.7,0.3]).reshape(3,2)
Pi=np.asarray([0.2,0.4,0.4])
O=np.asarray([0,1,0])
Forward(A,B,Pi,O)


三、维比特算法实践

#coding=utf-8
import numpy as np
#A为状态转移矩阵,B为观测矩阵,Pi为初始状态概率,o为观测序列
#函数的目标就是返回0所对应的,状态序列
def Viterbi(A,B,Pi,O):
    n=O.shape[0]
    m=A.shape[0]

    dis2src=np.zeros((n,m),dtype=np.float32)#dis2src(i,j)用于保留第i层顶点j,到原点的最大距离
    path=np.zeros((n,m),dtype=np.int)
    t=0;
    dis2src[t,:]=Pi*(B[:,O[t]])#初始化第一层顶点到原点的距离

    for t in range(n-1):
        t=t+1
        for i in range(m):
            predis=dis2src[t-1,:]

            #第t时刻的所有顶点,到t+1时刻顶点i的距离
            node_i2src=predis*A[:,i]
            dis2src[t,i]=np.max(node_i2src)*B[i,O[t]]


            path[t,i]=np.argmax(node_i2src);#计算顶点i对应到上一时刻的顶点的最大路径


    bestpaht=-1*np.ones(n,dtype=np.int)
    bestpaht[n-1]=(np.argmax(dis2src[n-1,:]))
    print np.log(np.max(dis2src[n-1,:]))
    #回溯
    for t in range(n-1)[::-1]:
        ti=path[t+1,bestpaht[t+1]]
        bestpaht[t]=ti
    print (bestpaht+1)

A=np.asarray([0.333,0.333,0.333,0.333,0.333,0.333,0.333,0.333,0.333]).reshape(3,3)
B=np.asarray([0.5,0.5,0.75,0.25,0.25,0.75]).reshape(3,2)
Pi=np.asarray([0.333,0.333,0.333])
O=np.asarray([0,0,0,0,1,0,1,1,1,1])
Viterbi(A,B,Pi,O)


你可能感兴趣的:(机器学习(十五)隐马尔科夫模型-未完待续)