差分法求解矩形区域椭圆方程
Δ u = f , x ∈ Ω = [ − 1 , 1 ] ∗ [ − 1 , 1 ] \Delta u = f, x \in \Omega = [-1,1]*[-1,1] Δu=f,x∈Ω=[−1,1]∗[−1,1]
u = g , x ∈ ∂ Ω u = g, x \in \partial \Omega u=g,x∈∂Ω
在x轴和y轴上分别取分划长度为dx,dy,令 M = i n t ( 2 / d x ) + 1 , N = i n t ( 2 / d x ) + 1 M = int(2/dx) + 1,N = int(2/dx) + 1 M=int(2/dx)+1,N=int(2/dx)+1,则得到(M + 1)(N + 1)个网格点。假设u在网格点(x_i,y_j)的近似解为u_{i,j}。则对于内部网格点,有:
− ( u i + 1 , j − 2 u i , j + u i − 1 , j ) / ( d x ∗ ∗ 2 ) − ( u i , j + 1 − 2 u i , j + u i , j − 1 ) / ( d y ∗ ∗ 2 ) = f i , j -(u_{i+1,j} - 2u_{i,j} + u_{i -1,j})/(dx**2)-(u_{i,j+1} - 2u_{i,j} + u_{i ,j-1})/(dy**2) = f_{i,j} −(ui+1,j−2ui,j+ui−1,j)/(dx∗∗2)−(ui,j+1−2ui,j+ui,j−1)/(dy∗∗2)=fi,j。
两边同时乘dxdy,则有:
( d y / d x ) ∗ ( u i + 1 , j + u i − 1 , j ) − 2 ( d x / d y + d y / d x ) u i , j + ( d x / d y ) ∗ ( u i , j + 1 + u i , j − 1 ) = f i , j ∗ d x ∗ d y (dy/dx)*(u_{i+1,j}+u_{i-1,j}) -2(dx/dy+dy/dx)u_{i,j}+(dx/dy)*(u_{i,j+1}+u_{i,j-1}) = f_{i,j}*dx*dy (dy/dx)∗(ui+1,j+ui−1,j)−2(dx/dy+dy/dx)ui,j+(dx/dy)∗(ui,j+1+ui,j−1)=fi,j∗dx∗dy。
def UU(X, order,prob):#X表示(x,t)
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return torch.log(temp)
if order[0]==1 and order[1]==0:#对x求偏导
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:#对t求偏导
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if prob==2:
if order[0]==0 and order[1]==0:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==0:
return (3*X[:,0]*X[:,0]-1) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==1:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==2 and order[1]==0:
return (6*X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==1:
return (3*X[:,0]*X[:,0]-1) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==2:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
2*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if prob==3:
temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
if order[0]==0 and order[1]==0:
return temp1 * temp2**(-1)
if order[0]==1 and order[1]==0:
return (2*X[:,0]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,0])
if order[0]==0 and order[1]==1:
return (-2*X[:,1]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,1])
if order[0]==2 and order[1]==0:
return (2) * temp2**(-1) + \
2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if order[0]==1 and order[1]==1:
return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
if order[0]==0 and order[1]==2:
return (-2) * temp2**(-1) + \
2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
def FF(prob,X):
return -UU(X,[0,2],prob) - UU(X,[2,0],prob)
class FD():
def __init__(self,bound,hx,prob):
self.prob = prob
self.dim = 2
self.hx = hx
self.nx = [int((bound[0,1] - bound[0,0])/self.hx[0]) + 1,int((bound[1,1] - bound[1,0])/self.hx[1]) + 1]
self.size = self.nx[0]*self.nx[1]
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx[0]):
for j in range(self.nx[1]):
self.X[m,0] = bound[0,0] + i*self.hx[0]
self.X[m,1] = bound[1,0] + j*self.hx[1]
m = m + 1
self.u_acc = UU(self.X,[0,0],self.prob).view(-1,1)
def matrix(self):
self.A = torch.zeros(self.nx[0]*self.nx[1],self.nx[0]*self.nx[1])
dx = self.hx[0];dy = self.hx[1]
for i in range(self.nx[0]):
for j in range(self.nx[1]):
dx = self.hx[0];dy = self.hx[1]
if i== 0 or i == self.nx[0] - 1 or j == 0 or j == self.nx[1] - 1:
self.A[i*self.nx[1]+j,i*self.nx[1]+j] = 1
else:
self.A[i*self.nx[1]+j,i*self.nx[1]+j] = 2*(dx/dy + dy/dx)
self.A[i*self.nx[1]+j,i*self.nx[1]+j-1] = -dx/dy
self.A[i*self.nx[1]+j,i*self.nx[1]+j+1] = -dx/dy
self.A[i*self.nx[1]+j,(i-1)*self.nx[1]+j] = -dy/dx
self.A[i*self.nx[1]+j,(i+1)*self.nx[1]+j] = -dy/dx
return self.A
def right(self):
self.b = torch.zeros(self.nx[0]*self.nx[1],1)
for i in range(self.nx[0]):
for j in range(self.nx[1]):
dx = self.hx[0];dy = self.hx[1]
x = i*dx;y = j*dy
X = torch.tensor([[x,y]]).float()
if i== 0 or i == self.nx[0] - 1 or j == 0 or j == self.nx[1] - 1:
self.b[i*self.nx[1]+j] = UU(self.X[i*self.nx[1]+j:i*self.nx[1]+j+1,:],[0,0],self.prob)
else:
self.b[i*self.nx[1]+j] = FF(self.prob,self.X[i*self.nx[1]+j:i*self.nx[1]+j+1,:])*dx*dy
return self.b
def solve(self):
A = self.matrix().numpy()
b = self.right().numpy()
x = np.zeros([b.shape[0],1])
u = GS(A,b,x,200)
return u
def error(u_pred,u_acc):
temp = ((u_pred - u_acc.numpy())**2).sum()/(u_acc.numpy()**2).sum()
return temp**(0.5)
bound = torch.tensor([[0,2],[0,1]]).float()
hx = [0.1,0.1]
prob = 3
fd = FD(bound,hx,prob)
u_pred = fd.solve()
u_acc = fd.u_acc
print(error(u_pred,u_acc))
上面这个方法最原始,同时最耗时,尤其是求解大规模矩阵的时候,矩阵的存储求解特别困难,上面是运用高斯迭代求解。基于这个,我们开始引入线高斯迭代和共轭梯度法,其中最重要的是共轭梯度法求解,这个速度非常惊人。
import numpy as np
import time
def TH(d,l,u,b):
n = b.shape[0]
y = np.zeros([n,1]);x = np.zeros([n,1])
alpha = np.zeros_like(d)
beta = np.zeros_like(u)
gama = l.copy()
alpha[0,0] = d[0,0];beta[0,0] = u[0,0]/d[0,0]
for i in range(1,n - 1):
alpha[i,0] = d[i,0] - l[i - 1,0]*beta[i - 1,0]
beta[i,0] = u[i,0]/alpha[i,0]
alpha[n - 1,0] = d[n - 1,0] - l[n - 2,0]*beta[n - 2,0]
y[0,0] = b[0,0]/alpha[0,0]
for i in range(1,n):
y[i,0] = (b[i,0] - gama[i - 1,0]*y[i - 1,0])/alpha[i,0]
x[n - 1,0] = y[n - 1,0]
for j in range(n - 2,-1,-1):
x[j,0] = y[j,0] - beta[j,0]*x[j + 1,0]
return x
def UU(X, order,prob):#X表示(x,t)
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return np.log(temp)
if order[0]==1 and order[1]==0:#对x求偏导
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:#对t求偏导
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if prob==2:
if order[0]==0 and order[1]==0:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
0.5*(np.exp(2*X[:,1])+np.exp(-2*X[:,1]))
if order[0]==1 and order[1]==0:
return (3*X[:,0]*X[:,0]-1) * \
0.5*(np.exp(2*X[:,1])+np.exp(-2*X[:,1]))
if order[0]==0 and order[1]==1:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
(np.exp(2*X[:,1])-np.exp(-2*X[:,1]))
if order[0]==2 and order[1]==0:
return (6*X[:,0]) * \
0.5*(np.exp(2*X[:,1])+np.exp(-2*X[:,1]))
if order[0]==1 and order[1]==1:
return (3*X[:,0]*X[:,0]-1) * \
(np.exp(2*X[:,1])-np.exp(-2*X[:,1]))
if order[0]==0 and order[1]==2:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
2*(np.exp(2*X[:,1])+np.exp(-2*X[:,1]))
if prob==3:
temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
if order[0]==0 and order[1]==0:
return temp1 * temp2**(-1)
if order[0]==1 and order[1]==0:
return (2*X[:,0]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,0])
if order[0]==0 and order[1]==1:
return (-2*X[:,1]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,1])
if order[0]==2 and order[1]==0:
return (2) * temp2**(-1) + \
2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if order[0]==1 and order[1]==1:
return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
if order[0]==0 and order[1]==2:
return (-2) * temp2**(-1) + \
2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
def FF(prob,X):
return -UU(X,[0,2],prob) - UU(X,[2,0],prob)
np.random.seed(1234)
class FD():
def __init__(self,bound,hx,prob):
self.prob = prob
self.dim = 2
self.hx = hx
self.nx = [int((bound[0,1] - bound[0,0])/self.hx[0]) + 1,int((bound[1,1] - bound[1,0])/self.hx[1]) + 1]
self.size = self.nx[0]*self.nx[1]
self.X = np.zeros([self.size,self.dim])
m = 0
for i in range(self.nx[0]):
for j in range(self.nx[1]):
self.X[m,0] = bound[0,0] + i*self.hx[0]
self.X[m,1] = bound[1,0] + j*self.hx[1]
m = m + 1
def u_init(self):
u = np.zeros([self.nx[0],self.nx[1]])
x = self.X.reshape([self.nx[0],self.nx[1],self.dim])
u[0,:] = UU(x[0,:,:],[0,0],self.prob)
u[-1,:] = UU(x[-1,:,:],[0,0],self.prob)
u[1:self.nx[0] - 1,0] = UU(x[1:self.nx[0] - 1,0,:],[0,0],self.prob)
u[1:self.nx[0] - 1,-1] = UU(x[1:self.nx[0] - 1,-1,:],[0,0],self.prob)
return u
def solve(self,rig):
tic = time.time()
u_old = self.u_init()
u_new = u_old.copy()
M = self.nx[0];N = self.nx[1]
dx = self.hx[0];dy = self.hx[1]
right = (dx*dy*rig).reshape([M - 2,N - 2])
#print(right[0,:].shape,u_new[0,1:N - 2].shape)
right[0,:] += u_new[0,1:N - 1]*dy/dx
right[-1,:] += u_new[- 1,1:N - 1]*dy/dx
r1 = dy/dx;r2 = dx/dy
l = - np.ones([M - 3,1])*r1
u = - np.ones([M - 3,1])*r1
d = np.ones([M - 2,1])*2*(r1 + r2)
#print(d.shape)
for k in range(1000):
for j in range(1,N - 1):
#print(u_old[1:M - 1,j - 1].shape,right[:,j].shape)
b = r2*(u_new[1:M - 1,j - 1] + u_old[1:M - 1,j + 1]) + right[:,j - 1]
u_new[1:M - 1,j:j + 1] = TH(d,l,u,b.reshape(-1,1))
#print(np.linalg.norm(u_new - u_old))
if (np.linalg.norm(u_new - u_old) < 1e-7):
break
else:
u_old = u_new.copy()
if k%100 == 0:
print('the iteration = %d'%(k + 1))
ela = time.time() - tic
print('the end iteration is %d,the time:%.2f'%(k + 1,ela))
return u_new.reshape(-1,1)
bound = np.array([[0,2.0],[0,1.0]])
hx = [0.05,0.05]
prob = 1
fd = FD(bound,hx,prob)
M = fd.nx[0];N = fd.nx[1]
X = fd.X
u_acc = UU(X,[0,0],prob).reshape(-1,1)
rig_in = (FF(prob,X).reshape(M,N))[1:M - 1,1:N - 1]
u_pred = fd.solve(rig_in)
print(max(abs(u_acc - u_pred)))
上面这个线高斯迭代避免了矩阵的生成,可以有效加快速度,但是还是太慢了,不能求解大规模问题。下面重点看共轭梯度法,这个的求解速度惊人,可以有效求解大规模问题,同时避免系数矩阵的存储和参与运算。
import torch
import numpy as np
import time
def UU(X, order,prob):#X表示(x,t)
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return np.log(temp)
if order[0]==1 and order[1]==0:#对x求偏导
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:#对t求偏导
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
def FF(prob,X):
return -UU(X,[0,2],prob) - UU(X,[2,0],prob)
class MG():
def __init__(self,bound,prob):
self.prob = prob
self.dim = 2
self.bound = bound
def u_init(self,hx):
nx = [int((self.bound[0,1] - self.bound[0,0])/hx[0]) + 1,int((self.bound[1,1] - self.bound[1,0])/hx[1]) + 1]
size = nx[0]*nx[1]
X = np.zeros([size,self.dim])
m = 0
for i in range(nx[0]):
for j in range(nx[1]):
X[m,0] = self.bound[0,0] + i*hx[0]
X[m,1] = self.bound[1,0] + j*hx[1]
m = m + 1
x = X.reshape([nx[0],nx[1],self.dim])
u = np.zeros([nx[0],nx[1]])
u[0,:] = UU(x[0,:,:],[0,0],self.prob)
u[-1,:] = UU(x[-1,:,:],[0,0],self.prob)
u[1:nx[0] - 1,0] = UU(x[1:nx[0] - 1,0,:],[0,0],self.prob)
u[1:nx[0] - 1,-1] = UU(x[1:nx[0] - 1,-1,:],[0,0],self.prob)
return u
def residual(self,u,rig,epoch):#rig也是一个矩阵,跟hx有关,hx控制rig的大小,扁平化处理以后就是方程的右端项
#由于u已经初始化,把边界条件吸收进去,所以rig的边界为0
tic = time.time()
nx = [rig.shape[0],rig.shape[1]]
hx = [(self.bound[0,1] - self.bound[0,0])/(nx[0] - 1),(self.bound[1,1] - self.bound[1,0])/(nx[1] - 1)]
M = nx[0];N = nx[1]
r1 = hx[1]/hx[0];r2 = hx[0]/hx[1]
rig = rig*hx[0]*hx[1]
#共轭梯度法求解线性方程组
res = np.zeros_like(rig)
for j in range(1,N - 1):
res[1:M - 1,j] = rig[1:M - 1,j] - 2*(r1 + r2)*u[1:M - 1,j] + \
r2*(u[1:M - 1,j - 1] + u[1:M - 1,j + 1]) + \
r1*(u[0:M - 2,j] + u[2:M,j])
rho = (res*res).sum()
k = 0
p = np.zeros_like(res)
while np.sqrt(rho) > 1e-10 and (k < epoch):
k = k + 1
if k == 1:
p = res.copy()
else:
beta = rho/rho_h;p = res + beta*p
w = np.zeros_like(res)
for j in range(1,N - 1):
w[1:M - 1,j] = - r2*(p[1:M - 1,j - 1] + p[1:M - 1,j + 1]) - \
r1*(p[0:M - 2,j] + p[2:M,j]) + 2*(r1 + r2)*p[1:M - 1,j]
alpha = rho/(p*w).sum()
u = u + alpha*p
res = res - alpha*w;rho_h = rho;rho = (res*res).sum()
#print(rho,np.linalg.norm(alpha*p))
ela = time.time() - tic
print('the end iteration:%d,the residual:%.3e,the time:%.2f'%(k,rho,ela),np.linalg.norm(alpha*p))
return u,res
def GS(self,u,rig,epoch):#rig也是一个矩阵,跟hx有关,hx控制rig的大小,扁平化处理以后就是方程的右端项
#由于u已经初始化,把边界条件吸收进去,所以rig的边界为0
tic = time.time()
nx = [rig.shape[0],rig.shape[1]]
hx = [(self.bound[0,1] - self.bound[0,0])/(nx[0] - 1),(self.bound[1,1] - self.bound[1,0])/(nx[1] - 1)]
#print(rig.shape)
u_new = u.copy()
M = nx[0];N = nx[1]
r1 = hx[1]/hx[0];r2 = hx[0]/hx[1]
#GS
for k in range(epoch):
for i in range(1,M - 1):
for j in range(1,N - 1):
u_new[i,j] = (hx[0]*hx[1]*rig[i,j] + r1*(u_new[i - 1,j] + u[i + 1,j]) + \
r2*(u_new[i,j - 1] + u[i,j + 1]))/(2*r1 + 2*r2)
u = u_new.copy()
#上面这部分迭代就是利用G-S迭代近似求解线性方程的过程,下面需要利用G-S迭代的结果求残差
for j in range(1,N - 1):
rig[1:M - 1,j] = hx[0]*hx[1]*rig[1:M - 1,j] - 2*(r1 + r2)*u_new[1:M - 1,j] + \
r2*(u_new[1:M - 1,j - 1] + u_new[1:M - 1,j + 1]) + \
r1*(u_new[0:M - 2,j] + u_new[2:M,j])
ela = time.time() - tic
print('the end iteration:%d,the time:%.2f'%(k,ela))
return u_new,rig
bound = np.array([[0,4.0],[0,4.0]])
prob = 1
mg = MG(bound,prob)
hx = [1/64,1/64]
nx = [int((bound[0,1] - bound[0,0])/hx[0]) + 1,int((bound[1,1] - bound[1,0])/hx[1]) + 1]
size = nx[0]*nx[1]
X = np.zeros([size,2])
m = 0
for i in range(nx[0]):
for j in range(nx[1]):
X[m,0] = bound[0,0] + i*hx[0]
X[m,1] = bound[1,0] + j*hx[1]
m = m + 1
u_acc = UU(X,[0,0],prob).reshape(-1,1)
rig = FF(prob,X).reshape([nx[0],nx[1]])
print(rig.shape)
rig[0,:] = 0
rig[-1,:] = 0
rig[1:nx[0] - 1,0] = 0
rig[1:nx[0] - 1,-1] = 0
epoch = 2*size
u_pred,res = mg.residual(mg.u_init(hx),rig,epoch)
print(max(abs(u_acc - u_pred.reshape(-1,1))))