基于小批量随机梯度下降法的通用分类器

基于小批量随机梯度下降法的通用分类器

1. 方法源码

'''
The General Classifier Based on Mini-Batch Stochastic Gradient Descent.
'''

import numpy as np
import random

'''
Data Restraint: numpy.ndarray.
Test Data Scale: 2683*6603.
Test Time Evaluation: Great! 
Design Style: Scikit-Learn, PyTorch.
Train Mode: Min-Batch.
'''

class Classifier:
    def __init__(self, epochs: int=200, batch_size: int=16, lr: float=0.1):
        '''
        :param epochs: max iteration, default: int=200.
        :param batch_size: batch size, default: int=16.
        :param lr: learning rate, default: float=0.1.

        '''
        self.epochs = epochs
        self.lr = lr
        self.batch_size = batch_size
    
    def __data_matrix(self, X):
        '''
        :param X: data converted to augmented, numpy.ndarray.

        '''
        ones = np.ones(X.shape[0])
        return np.insert(X, 0, ones, axis=1)
    
    def __softmax(self, part):
        '''
        :param part: part of features, numpy.ndarray.

        '''
        part -= np.max(part, axis=1).reshape(-1, 1)
        return np.exp(part) / np.sum(np.exp(part))

    def __data_iter(self, X, y):
        '''
        :param X: features, numpy.ndarray.
        :param y: labels, numpy.ndarray.

        '''
        num_examples = len(X)
        indices = list(range(num_examples))
        random.shuffle(indices)
        for index in range(0, num_examples, self.batch_size):
            batch_indices = np.array(indices[index: min(index + self.batch_size, num_examples)])
            yield X[batch_indices], y[batch_indices]
    
    def fit(self, X, y, console: int=100, decay: int=20) -> None:
        '''
        :param X: train data, numpy.ndarray.
        :param y: correct labels, numpy.ndarray.
        :parm console: console output interval, default: int=100.
        :parm decay: learning rate deacy change interval, default: int=20.

        '''
        assert len(X.shape) == 2, 'please ensure data like: (2, 2) shape'
        assert len(y.shape) == 1, 'please ensure data like: (2,) shape'
        augmented, unique = self.__data_matrix(X), np.unique(y)
        self.num_classes = len(unique)
        indices = dict(zip(unique, range(self.num_classes)))
        self.weights = np.zeros((augmented.shape[1], self.num_classes), dtype=np.float64)
        for epoch in range(self.epochs):
            for step, (features, labels) in enumerate(self.__data_iter(augmented, y)):
                res = self.__softmax(features @ self.weights)
                obj = np.eye(self.num_classes)[[indices[value] for value in labels]]
                err = res - obj
                self.weights -= self.lr * (features.T @ err) / self.batch_size
                if (step + 1) % console == 0:
                    print ('Epoch [{}/{}], Step [{}/{}], Loss {:.4f}'.format(epoch+1, self.epochs, step+1, len(X) // self.batch_size, np.linalg.norm(err)))
            # learning rate decay
            if (epoch + 1) % decay == 0:
                self.lr /= 3 # replace it with another schedule.
                
    def score(self, X, y) -> float:
        '''
        :param X: test data, numpy.ndarray.
        :param y: correct labels, numpy.ndarray.

        '''
        assert len(X.shape) == 2, 'please ensure data like: (2, 2) shape'
        assert len(y.shape) == 1, 'please ensure data like: (2,) shape'
        X = self.__data_matrix(X)
        corr, prob = 0, X @ self.weights
        predicted = np.argmax(prob, axis=1)
        corr += (predicted == y).sum()
        return corr / len(X)
    
    def predict(self, X):
        '''
        :param X: predict data, numpy.ndarray.

        '''
        assert len(X.shape) == 2, 'please ensure data like: (2, 2) shape'
        X = self.__data_matrix(X)
        prob = X @ self.weights
        return np.argmax(prob, axis=1)

2. 测试程序

# import packages and model
import numpy as np
import scipy.io as sio
from ml import Classifier

# load data
def process_data(url: str) -> tuple:
    data = sio.loadmat(url)
    return data['X'], data['class']

# get features and labels
features, labels = process_data('textretrieval.mat')

# split data
def pretreat(features, labels) -> tuple:
    labels = np.argwhere(labels == 1)[:, 1]
    return features[:2000, :], features[2000:, :], labels[:2000], labels[2000:]

# get splited data
X_train, X_test, y_train, y_test = pretreat(features, labels)

'''
hyper-parameters:
- epochs: 200
- batch_size: 1
- lr: 0.1
'''

# train model
model = Classifier(epochs=200, batch_size=1, lr=0.1)
model.fit(X_train, y_train)

# evaluate model
model.score(X_test, y_test)

# predict result
print((y_test == model.predict(X_test)).sum() / len(X_test))

你可能感兴趣的:(机器学习方法,python,numpy,机器学习,梯度下降法)