python代码

python\main_script.py

from multiprocessing import Process
import subprocess
 
def call_script(args):
    # 创建一个新的进程来运行script_to_call.py
    process = Process(target=run_script, args=(args[0], args[1]))
    process.start()
    process2 = Process(target=run_script, args=(args[0], args[1]))
    process2.start()
    process.join()  # 等待进程完成
    process2.join()  # 等待进程完成
 
 
def run_script(arg1, arg2):
     subprocess.run(["python", "script_to_call.py", str(arg1), str(arg2)], check=True)
 
if __name__ == "__main__":
    args = [4,6,7]
    call_script(args)

python\script_to_call.py

import time
import sys
 
def main(arg1, arg2):
    print(f"Arguments received: {arg1}, {arg2}")
    # 模拟一些工作
    time.sleep(2)
    print("Work done in script_to_call.py")
    return 0
 
if __name__ == "__main__":
    # 从命令行参数获取输入(当直接运行此脚本时)
    # 但在这个例子中,我们不会直接运行它,而是通过多进程调用它
    # 因此,这里的代码主要是为了演示如何接收参数
    arg1 = int(sys.argv[1])
    arg2 = int(sys.argv[2])
    result = main(arg1, arg2)
    print(f"Result from script_to_call.py: {result}")
神经网络--回归
import numpy as np
import matplotlib.pyplot as plt

input_data=np.arange(0,np.pi*2,0.1)
correct_data=np.sin(input_data)

input_data=(input_data-np.pi)/np.pi
n_data=len(correct_data)

epoch=2001
eta=0.1 #学习率
interval=100

n_in=1
n_mid=3
n_out=1

wb_width=0.01 #权重和偏置的初始宽度
class MiddleLayer:
    def __init__(self,n_upper,n):
        self.w=wb_width*np.random.randn(n_upper,n)
        self.b=wb_width*np.random.randn(n)

    def forward(self,x):
        self.x=x
        u=np.dot(x,self.w)+self.b
        self.y=1/(1+np.exp(-u)) #sigmoid函数

    def backward(self,grad_y):
        delta=grad_y*(1-self.y)*self.y #sigmoid函数的导数/

        self.grad_w=np.dot(self.x.T,delta)
        self.grad_b=np.sum(delta,axis=0)

        self.grad_x=np.dot(delta,self.w.T)

    def update(self,eta): #更新权重和偏置
        self.w-=eta*self.grad_w
        self.b-=eta*self.grad_b

class OutputLayer:
    def __init__(self,n_upper,n):
        self.w=wb_width*np.random.randn(n_upper,n)
        self.b=wb_width*np.random.randn(n)

    def forward(self,x):
        self.x=x
        u=np.dot(x,self.w)+self.b
        self.y=u

    def backward(self,t):
        delta=self.y-t

        self.grad_w=np.dot(self.x.T,delta)
        self.grad_b=np.sum(delta,axis=0)

        self.grad_x=np.dot(delta,self.w.T)

    def update(self,eta):
        self.w-=eta*self.grad_w
        self.b-=eta*self.grad_b       

middle_layer=MiddleLayer(n_in,n_mid)
output_layer=OutputLayer(n_mid,n_out)

for i in range(epoch):
    #随机打乱索引值
    index_random=np.arange(n_data)
    np.random.shuffle(index_random)

    #用于结果的显示
    total_error=0
    plt_x=[]
    plt_y=[]

    for idx in index_random:
        x=input_data[idx:idx+1]
        t=correct_data[idx:idx+1]

        #前向传播
        middle_layer.forward(x.reshape(1,1))
        output_layer.forward(middle_layer.y)

        #反向传播
        output_layer.backward(t.reshape(1,1))
        middle_layer.backward(output_layer.grad_x)

        #更新权重和偏置
        middle_layer.update(eta)
        output_layer.update(eta)

        if i%interval==0:
            y=output_layer.y.reshape(-1) #将矩阵转换为向量
            total_error+=1.0/2.0*np.sum(np.square(y-t)) #平方和误差

            plt_x.append(x)
            plt_y.append(y)
    
    if i%interval==0:
        plt.plot(input_data,correct_data,linestyle="dashed")
        plt.scatter(plt_x,plt_y,marker="+")
        print(plt_y)
        plt.show()

        print("Epoch:"+str(i)+"/"+str(epoch),"Error:"+str(total_error/n_data))

你可能感兴趣的:(python,java,前端)