【Mac安装基础数学运算包】

Mac安装基础数学运算包

  • 前言
    • 快速安装Python及科学计算工具包
      • 安装Python3.10
      • 添加环境变量
      • 科学计算工具包
        • cvxopt 凸优化(convex optimization)
        • Python Automation Test
        • 科学计算包
      • SVM算法示例
    • 安装PyTorch
      • 安装Xcode
      • Install MiniForge
      • 在MiniForge下安装科学计算包
      • PyTorch示例
      • GPU CUDA是否生效
    • 参考

前言

最近在Mac环境下安装Python3相关的一些基础数学运算包,遇到一些问题,解决后做下记录以备后需。

快速安装Python及科学计算工具包

安装Python3.10

brew uinstall [email protected]
brew install [email protected]

添加环境变量

export PATH="/usr/local/opt/[email protected]/bin:$PATH"
export LDFLAGS="-L/usr/local/opt/[email protected]/lib"
export PKG_CONFIG_PATH="/usr/local/opt/[email protected]/lib/pkgconfig"

科学计算工具包

cvxopt 凸优化(convex optimization)

brew install suite-sparse
pip3 install cvxopt

Python Automation Test

pip3 install nose

科学计算包

pip3 install numpy
pip3 install scipy
pip3 install sklearn
 
 # matplotlib
brew install libpng
brew install freetype
pip3 install matplotlib
 
# MPI
brew install openmpi
pip3 install mpi4py
 
# FFTW
brew install fftw --with-mpi
 
# HDF5
brew install hdf5
pip3 install h5py

SVM算法示例

import numpy as np
import cvxopt
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix

class SVM:
        def fit(self, X, y):
                n_samples, n_features = X.shape
                # P = X^T X
                K = np.zeros((n_samples, n_samples))
                for i in range(n_samples):
                        for j in range(n_samples):
                                K[i,j] = np.dot(X[i], X[j])
                P = cvxopt.matrix(np.outer(y, y) * K)
                # q = -1 (1xN)
                q = cvxopt.matrix(np.ones(n_samples) * -1)
                # A = y^T 
                A = cvxopt.matrix(y, (1, n_samples))
                # b = 0 
                b = cvxopt.matrix(0.0)
                # -1 (NxN)
                G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
                # 0 (1xN)
                h = cvxopt.matrix(np.zeros(n_samples))
                solution = cvxopt.solvers.qp(P, q, G, h, A, b)
                # Lagrange multipliers
                a = np.ravel(solution['x'])
                # Lagrange have non zero lagrange multipliers
                sv = a > 1e-5
                ind = np.arange(len(a))[sv]
                self.a = a[sv]
                self.sv = X[sv]
                self.sv_y = y[sv]
                # Intercept
                self.b = 0
                for n in range(len(self.a)):
                        self.b += self.sv_y[n]
                        self.b -= np.sum(self.a * self.sv_y * K[ind[n], sv])
                self.b /= len(self.a)
                # Weights
                self.w = np.zeros(n_features)
                for n in range(len(self.a)):
                        self.w += self.a[n] * self.sv_y[n] * self.sv[n]
        def project(self, X):
                return np.dot(X, self.w) + self.b
        def predict(self, X):
                return np.sign(self.project(X))

X, y = make_blobs(n_samples=250, centers=2,
                  random_state=0, cluster_std=0.60)
y[y == 0] = -1
tmp = np.ones(len(X))
y = tmp * y
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='winter')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

svm = SVM()
svm.fit(X_train, y_train)

def f(x, w, b, c=0):
    return (-w[0] * x - b + c) / w[1]

plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap='winter')
# w.x + b = 0
a0 = -4; a1 = f(a0, svm.w, svm.b)
b0 = 4; b1 = f(b0, svm.w, svm.b)
plt.plot([a0,b0], [a1,b1], 'k')
# w.x + b = 1
a0 = -4; a1 = f(a0, svm.w, svm.b, 1)
b0 = 4; b1 = f(b0, svm.w, svm.b, 1)
plt.plot([a0,b0], [a1,b1], 'k--')
# w.x + b = -1
a0 = -4; a1 = f(a0, svm.w, svm.b, -1)
b0 = 4; b1 = f(b0, svm.w, svm.b, -1)
plt.plot([a0,b0], [a1,b1], 'k--')

y_pred = svm.predict(X_test)
confusion_matrix(y_test, y_pred)

svc = LinearSVC()
svc.fit(X_train, y_train)


plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap='winter');
ax = plt.gca()
xlim = ax.get_xlim()
w = svc.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(xlim[0], xlim[1])
yy = a * xx - svc.intercept_[0] / w[1]
plt.plot(xx, yy)
yy = a * xx - (svc.intercept_[0] - 1) / w[1]
plt.plot(xx, yy, 'k--')
yy = a * xx - (svc.intercept_[0] + 1) / w[1]
plt.plot(xx, yy, 'k--')

y_pred = svc.predict(X_test)
confusion_matrix(y_test, y_pred)

安装PyTorch

安装Xcode

xcode-select --install

Install MiniForge

Essentially, MiniForge is a conda installer, comparable with MiniConda. One of its greatest advantages is its compatibility with MacOS, including the M1 devices. To download it, go to this page, choose the installer for Apple Silicon and execute:

链接地址: https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh

下载到~/Downloads目录下,然后执行shell

sh ~/Downloads/Miniforge3-MacOSX-arm64.sh
cd ~/miniforge3/bin
./conda create --name pytorch_m1 python=3.10
./conda activate pytorch_m1
cd ~/miniforge3/bin
./conda install -c pytorch pytorch
./conda install -c conda-forge openblas
./conda install -c conda-forge jupyter jupyterlab

在MiniForge下安装科学计算包

cd ~/miniforge3/bin
./pip3 install cvxopt
./pip3 install scipy
./pip3 install sklearn
./pip3 install matplotlib

PyTorch示例

# -*- coding: utf-8 -*-
import torch
import math


dtype = torch.float
device = torch.device("cpu")

# Create random input and output data
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)

# Randomly initialize weights
a = torch.randn((), device=device, dtype=dtype)
b = torch.randn((), device=device, dtype=dtype)
c = torch.randn((), device=device, dtype=dtype)
d = torch.randn((), device=device, dtype=dtype)

learning_rate = 1e-6
for t in range(2000):
    # Forward pass: compute predicted y
    y_pred = a + b * x + c * x ** 2 + d * x ** 3

    # Compute and print loss
    loss = (y_pred - y).pow(2).sum().item()
    if t % 100 == 99:
        print(t, loss)

    # Backprop to compute gradients of a, b, c, d with respect to loss
    grad_y_pred = 2.0 * (y_pred - y)
    grad_a = grad_y_pred.sum()
    grad_b = (grad_y_pred * x).sum()
    grad_c = (grad_y_pred * x ** 2).sum()
    grad_d = (grad_y_pred * x ** 3).sum()

    # Update weights using gradient descent
    a -= learning_rate * grad_a
    b -= learning_rate * grad_b
    c -= learning_rate * grad_c
    d -= learning_rate * grad_d


print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')

GPU CUDA是否生效

import torch
print(torch.cuda.is_available())

参考

  1. Support Vector Machine Python Example,https://towardsdatascience.com/support-vector-machine-python-example-d67d9b63f1c8
  2. How to Install PyTorch on Apple M1-series,https://betterprogramming.pub/how-to-install-pytorch-on-apple-m1-series-512b3ad9bc6
  3. Setting up M1 Mac for both TensorFlow and PyTorch
    , https://naturale0.github.io/2021/01/29/setting-up-m1-mac-for-both-tensorflow-and-pytorch
  4. PyTorch QUICKSTART, https://pytorch.org/tutorials/beginner/basics/quickstart_tutorial.html

你可能感兴趣的:(机器学习,macos,python,开发语言)