WorkflowSimBasicExample1代码

 * Copyright 2012-2013 University Of Southern California
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you may not
 * use this file except in compliance with the License. You may obtain a copy of
 * the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations under
 * the License.
 */
package org.workflowsim.examples;

import java.io.File;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.LinkedList;
import java.util.List;
import org.cloudbus.cloudsim.Cloudlet;
import org.cloudbus.cloudsim.CloudletSchedulerSpaceShared;
import org.cloudbus.cloudsim.DatacenterCharacteristics;
import org.cloudbus.cloudsim.HarddriveStorage;
import org.cloudbus.cloudsim.Host;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Storage;
import org.cloudbus.cloudsim.VmAllocationPolicySimple;
import org.cloudbus.cloudsim.VmSchedulerTimeShared;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.provisioners.BwProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.RamProvisionerSimple;
import org.workflowsim.CondorVM;
import org.workflowsim.Task;
import org.workflowsim.WorkflowDatacenter;
import org.workflowsim.Job;
import org.workflowsim.WorkflowEngine;
import org.workflowsim.WorkflowPlanner;
import org.workflowsim.utils.ClusteringParameters;
import org.workflowsim.utils.OverheadParameters;
import org.workflowsim.utils.Parameters;
import org.workflowsim.utils.ReplicaCatalog;
import org.workflowsim.utils.Parameters.ClassType;

/**
 * This WorkflowSimExample creates a workflow planner, a workflow engine, and
 * one schedulers, one data centers and 20 vms. You should change daxPath at
 * least. You may change other parameters as well.
 *
 * @author Weiwei Chen
 * @since WorkflowSim Toolkit 1.0
 * @date Apr 9, 2013
 */
public class WorkflowSimBasicExample1 {
	
    protected static List<CondorVM> createVM(int userId, int vms) {
        //Creates a container to store VMs. This list is passed to the broker later
        LinkedList<CondorVM> list = new LinkedList<>();

        //VM Parameters
        
        long size = 10000; //image size (MB)
        int ram = 512; //vm memory (MB)
        int mips = 1000;
        long bw = 1000;
        int pesNumber = 1; //number of cpus
        String vmm = "Xen"; //VMM name

        //create VMs
        CondorVM[] vm = new CondorVM[vms];
        for (int i = 0; i < vms; i++) {
            double ratio = 1.0;
            vm[i] = new CondorVM(i, userId, mips * ratio, pesNumber, ram, bw, size, vmm, new CloudletSchedulerSpaceShared());
            list.add(vm[i]);
        }
        return list;
    }

    // STATIC METHODS ///
    /**
     * Creates main() to run this example This example has only one datacenter
     * and one storage
     */
    public static void main(String[] args) {
        try {
            // First step: Initialize the WorkflowSim package. 
            /**
             * However, the exact number of vms may not necessarily be vmNum If
             * the data center or the host doesn't have sufficient resources the
             * exact vmNum would be smaller than that. Take care.
             */
            int vmNum = 20;//number of vms;
            /**
             * Should change this based on real physical path
             */
            
            //String daxPath = "/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";
            String daxPath = "F:\\WorkFlowSim\\WorkflowSim-1.0-master\\config\\dax\\Montage_100.xml";
            File daxFile = new File(daxPath);
            if (!daxFile.exists()) {
                Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");
                return;
            }

            /**
             * Since we are using MINMIN scheduling algorithm, the planning
             * algorithm should be INVALID such that the planner would not
             * override the result of the scheduler
             */
            Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
            Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
            ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

            /**
             * No overheads
             */
            OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);

            /**
             * No Clustering
             */
            ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
            ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

            /**
             * Initialize static parameters
             */
            Parameters.init(vmNum, daxPath, null,
                    null, op, cp, sch_method, pln_method,
                    null, 0);
            ReplicaCatalog.init(file_system);

            // before creating any entities.
            int num_user = 1;   // number of grid users
            Calendar calendar = Calendar.getInstance();
            boolean trace_flag = false;  // mean trace events

            // Initialize the CloudSim library
            CloudSim.init(num_user, calendar, trace_flag);

            WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0");

            /**
             * Create a WorkflowPlanner with one schedulers.
             */
            WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
            /**
             * Create a WorkflowEngine.
             */
            WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
            /**
             * Create a list of VMs.The userId of a vm is basically the id of
             * the scheduler that controls this vm.
             */
            List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

            /**
             * Submits this list of vms to this WorkflowEngine.
             */
            wfEngine.submitVmList(vmlist0, 0);

            /**
             * Binds the data centers with the scheduler.
             */
            wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);
            CloudSim.startSimulation();
            List<Job> outputList0 = wfEngine.getJobsReceivedList();
            CloudSim.stopSimulation();
            printJobList(outputList0);
        } catch (Exception e) {
            Log.printLine("The simulation has been terminated due to an unexpected error");
        }
    }

    protected static WorkflowDatacenter createDatacenter(String name) {

        // Here are the steps needed to create a PowerDatacenter:
        // 1. We need to create a list to store one or more
        //    Machines
        List<Host> hostList = new ArrayList<>();

        // 2. A Machine contains one or more PEs or CPUs/Cores. Therefore, should
        //    create a list to store these PEs before creating
        //    a Machine.
        for (int i = 1; i <= 20; i++) {
            List<Pe> peList1 = new ArrayList<>();
            int mips = 2000;
            // 3. Create PEs and add these into the list.
            //for a quad-core machine, a list of 4 PEs is required:
            peList1.add(new Pe(0, new PeProvisionerSimple(mips))); // need to store Pe id and MIPS Rating
            peList1.add(new Pe(1, new PeProvisionerSimple(mips)));

            int hostId = 0;
            int ram = 2048; //host memory (MB)
            long storage = 1000000; //host storage
            int bw = 10000;
            hostList.add(
                    new Host(
                            hostId,
                            new RamProvisionerSimple(ram),
                            new BwProvisionerSimple(bw),
                            storage,
                            peList1,
                            new VmSchedulerTimeShared(peList1))); // This is our first machine
            //hostId++;
        }

        // 4. Create a DatacenterCharacteristics object that stores the
        //    properties of a data center: architecture, OS, list of
        //    Machines, allocation policy: time- or space-shared, time zone
        //    and its price (G$/Pe time unit).
        String arch = "x86";      // system architecture
        String os = "Linux";          // operating system
        String vmm = "Xen";
        double time_zone = 10.0;         // time zone this resource located
        double cost = 3.0;              // the cost of using processing in this resource
        double costPerMem = 0.05;		// the cost of using memory in this resource
        double costPerStorage = 0.1;	// the cost of using storage in this resource
        double costPerBw = 0.1;			// the cost of using bw in this resource
        LinkedList<Storage> storageList = new LinkedList<>();	//we are not adding SAN devices by now
        WorkflowDatacenter datacenter = null;

        DatacenterCharacteristics characteristics = new DatacenterCharacteristics(
                arch, os, vmm, hostList, time_zone, cost, costPerMem, costPerStorage, costPerBw);

        // 5. Finally, we need to create a storage object.
        /**
         * The bandwidth within a data center in MB/s.
         */
        int maxTransferRate = 15;// the number comes from the futuregrid site, you can specify your bw

        try {
            // Here we set the bandwidth to be 15MB/s
            HarddriveStorage s1 = new HarddriveStorage(name, 1e12);
            s1.setMaxTransferRate(maxTransferRate);
            storageList.add(s1);
            datacenter = new WorkflowDatacenter(name, characteristics, new VmAllocationPolicySimple(hostList), storageList, 0);
        } catch (Exception e) {
            e.printStackTrace();
        }
        return datacenter;
    }

    /**
     * Prints the job objects
     *
     * @param list list of jobs
     */
    protected static void printJobList(List<Job> list) {
        String indent = "    ";
        Log.printLine();
        Log.printLine("========== OUTPUT ==========");
        Log.printLine("Job ID" + indent + "Task ID" + indent + "STATUS" + indent
                + "Data center ID" + indent + "VM ID" + indent + indent
                + "Time" + indent + "Start Time" + indent + "Finish Time" + indent + "Depth");
        DecimalFormat dft = new DecimalFormat("###.##");
        for (Job job : list) {
            Log.print(indent + job.getCloudletId() + indent + indent);
            if (job.getClassType() == ClassType.STAGE_IN.value) {
                Log.print("Stage-in");
            }
            for (Task task : job.getTaskList()) {
                Log.print(task.getCloudletId() + ",");
            }
            Log.print(indent);

            if (job.getCloudletStatus() == Cloudlet.SUCCESS) {
                Log.print("SUCCESS");
                Log.printLine(indent + indent + job.getResourceId() + indent + indent + indent + job.getVmId()
                        + indent + indent + indent + dft.format(job.getActualCPUTime())
                        + indent + indent + dft.format(job.getExecStartTime()) + indent + indent + indent
                        + dft.format(job.getFinishTime()) + indent + indent + indent + job.getDepth());
            } else if (job.getCloudletStatus() == Cloudlet.FAILED) {
                Log.print("FAILED");
                Log.printLine(indent + indent + job.getResourceId() + indent + indent + indent + job.getVmId()
                        + indent + indent + indent + dft.format(job.getActualCPUTime())
                        + indent + indent + dft.format(job.getExecStartTime()) + indent + indent + indent
                        + dft.format(job.getFinishTime()) + indent + indent + indent + job.getDepth());
            }
        }
    }
}

package org.workflowsim.examples;

import java.io.File;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.LinkedList;
import java.util.List;
import org.cloudbus.cloudsim.Cloudlet;
import org.cloudbus.cloudsim.CloudletSchedulerSpaceShared;
import org.cloudbus.cloudsim.DatacenterCharacteristics;
import org.cloudbus.cloudsim.HarddriveStorage;
import org.cloudbus.cloudsim.Host;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Storage;
import org.cloudbus.cloudsim.VmAllocationPolicySimple;
import org.cloudbus.cloudsim.VmSchedulerTimeShared;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.provisioners.BwProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.RamProvisionerSimple;
import org.workflowsim.CondorVM;
import org.workflowsim.Task;
import org.workflowsim.WorkflowDatacenter;
import org.workflowsim.Job;
import org.workflowsim.WorkflowEngine;
import org.workflowsim.WorkflowPlanner;
import org.workflowsim.utils.ClusteringParameters;
import org.workflowsim.utils.OverheadParameters;
import org.workflowsim.utils.Parameters;
import org.workflowsim.utils.ReplicaCatalog;
import org.workflowsim.utils.Parameters.ClassType;

/**

 这个WorkflowSimExample 创建了
 	一个工作流计划器(workflow planner)、
 	一个工作流引擎(workflow engine)、
 	一个调度器(schedulers)、
 	一个数据中心(data centers)、
 	20个虚拟机(20 vms)。
 您至少应该更改daxPath。您还可以更改其他参数。

 */

public class WorkflowSimBasicExampleTest {

	protected static List<CondorVM> createVM(int userId, int vms) {
		//创建用于存放虚拟机的容器。该列表稍后被传递给代理
		LinkedList<CondorVM> list = new LinkedList<>();

		//  虚拟机参数(VM Parameters)
		long size = 10000; // image size (MB) 划分的虚拟硬盘大小
		int ram = 512; // vm memory (MB) 虚拟内存
		int mips = 1000; // mips:Million Instructions Per Second   衡量cpu的性能指标
		long bw = 1000;	// bindwidth:带宽		任务到任务的传输成本
		int pesNumber = 1; // number of cpus    cpu的数量
		String vmm = "Xen"; // VMM name    VMM: Virtual Machine Manager    虚拟机管理名称

		// create VMs  创建虚拟机集群
		CondorVM[] vm = new CondorVM[vms];	// CondorVm扩展一个Vm
		for (int i = 0; i < vms; i++) {
			double ratio = 1.0;
			vm[i] = new CondorVM(i, userId, mips * ratio, pesNumber, ram, bw, size, vmm,
					new CloudletSchedulerSpaceShared());
        // cloudletschedulerspacesshared 实现了由虚拟机执行的调度策略  云任务调度时间共享,所有任务同时运行
			list.add(vm[i]);
		}
		return list;
	}

	// STATIC METHODS ///
	/**
	   创建main()以运行此示例。此示例只有一个数据中心(datacenter)和一个存储(storage)
	 */
	public static void main(String[] args) {
		try {
			// 第一步:初始化WorkflowSim包.
			/**
			 但是,虚拟机的确切数量不一定是vmNum。如果数据中心或主机没有足够的资源,那么准确的vmNum可能会小于vmNum。当心
			 */
			int vmNum = 20;// number of vms; 虚拟机的数量
			/**
			 应该根据实际物理路径进行更改
			 */
			// String daxPath ="/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";
			String daxPath = "F:\\WorkFlowSim\\WorkflowSim-1.0-master\\config\\dax\\Montage_100.xml";

			File daxFile = new File(daxPath);
			if (!daxFile.exists()) {
				Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");
				return;
			}

			/**
			  由于我们使用MINMIN调度算法(MINMIN scheduling algorithm),所以规划算法(planning algorithm)应该是INVALID,这样规划程序就不会覆盖调度程序的结果
			 */
			Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
			Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
			ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

			/**
			 * No overheads 费用开支
			 */
			OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);

			/**
			 * No Clustering 集群
			 */
			ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
			ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

			// 初始化静态参数
			Parameters.init(vmNum, daxPath, null, null, op, cp, sch_method, pln_method, null, 0);
			ReplicaCatalog.init(file_system);

			// 在创建任何实体之前
			int num_user = 1; // number of grid users	网格用户数
			Calendar calendar = Calendar.getInstance();	// 日历
			boolean trace_flag = false; // mean trace events  平均跟踪事件

			// 初始化CloudSim库
			CloudSim.init(num_user, calendar, trace_flag);

			WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0");

			// 创建带有一个调度器的WorkflowPlanner工作流计划器。
			WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
			
			// 创建一个工作流引擎
			WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
			
			// 创建虚拟机列表。虚拟机的userId基本上就是调度程序的id控制这个vm
			List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

			// 将这个虚拟机列表提交给这个WorkflowEngine		
			wfEngine.submitVmList(vmlist0, 0);

			// 将数据中心(data centers)绑定到调度程序
			wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);
			CloudSim.startSimulation();	// 开始仿真
			List<Job> outputList0 = wfEngine.getJobsReceivedList();	// 获取已接收的作业列表
			CloudSim.stopSimulation();
			printJobList(outputList0);
		} catch (Exception e) {
			Log.printLine("The simulation has been terminated due to an unexpected error");
		}
	}
	
	protected static WorkflowDatacenter createDatacenter(String name) {	// 创建数据中心

		// 以下是创建PowerDatacenter所需的步骤:
		// 1. 我们需要创建一个列表来存储一个或多个machine
		List<Host> hostList = new ArrayList<>();

		// 2. 一台机器包含一个或多个pe或内核/cpu(CPUs/Cores)。因此,应该在创建Machine之前创建一个列表来存储这些PE。
		// Pe代表cpu单元,字段:pe状态和分配策略peProvisioner 同一台主机所有pe有相同的处理能力
		for (int i = 1; i <= 20; i++) {
			List<Pe> peList1 = new ArrayList<>();
			int mips = 2000;
			
			// 3. 创建PE并将其添加到列表中。
			// 对于四核机器,需要列出4个pe(PEid,MIPS速度):
			peList1.add(new Pe(0, new PeProvisionerSimple(mips))); // 需要存储Pe id和MIPS等级
			peList1.add(new Pe(1, new PeProvisionerSimple(mips)));
			
			// 下面是声明了host的参数,并将其添加到机器列表中
			// host参数:hostID、内存、存储容量、带宽

			int hostId = 0;
			int ram = 2048; // host memory (MB)
			long storage = 1000000; // host storage
			int bw = 10000;
			hostList.add(new Host(hostId, new RamProvisionerSimple(ram), new BwProvisionerSimple(bw), storage, peList1,
					new VmSchedulerTimeShared(peList1))); // 创建机器完成
		
		}

		/*
		 4、属性创建一个DatacenterCharacteristics对象数据中心的属性:体系结构,操作系统,列表机器,
		 分配策略:时间或空间共享,时区及其价格(G$/Pe时间单位)。
		*/
		String arch = "x86"; // system architecture
		String os = "Linux"; // operating system
		String vmm = "Xen";
		double time_zone = 10.0; // time zone this resource located	该资源所在的时区
		double cost = 3.0; // the cost of using processing in this resource	在此资源中使用处理的成本
		double costPerMem = 0.05; // the cost of using memory in this resource	在此资源中使用内存的成本
		double costPerStorage = 0.1; // the cost of using storage in this resource	在此资源中使用存储的成本
		double costPerBw = 0.1; // the cost of using bw in this resource	在此资源中使用bw带宽的成本
		LinkedList<Storage> storageList = new LinkedList<>(); // we are not adding SAN devices by now	我们现在还没有添加SAN设备
		WorkflowDatacenter datacenter = null;

		DatacenterCharacteristics characteristics = new DatacenterCharacteristics(arch, os, vmm, hostList, time_zone,
				cost, costPerMem, costPerStorage, costPerBw);

		// 5. 最后,我们需要创建一个数据中心对象。
		// 数据中心内的带宽,以MB/s为单位。
		
		int maxTransferRate = 15;// 这个数字来自futuregrid网站,你可以指定你的带宽bw

		try {
			// 这里我们将带宽设置为15MB/s
			HarddriveStorage s1 = new HarddriveStorage(name, 1e12);
			s1.setMaxTransferRate(maxTransferRate);
			storageList.add(s1);
			datacenter = new WorkflowDatacenter(name, characteristics, new VmAllocationPolicySimple(hostList),
					storageList, 0);
		} catch (Exception e) {
			e.printStackTrace();
		}
		return datacenter;
	}

	/**
	 * Prints the job objects
	 *
	 * @param list list of jobs
	 */
	protected static void printJobList(List<Job> list) {
		String indent = "    ";
		Log.printLine();
		Log.printLine("========== OUTPUT ==========");
		Log.printLine("Job ID" + indent + "Task ID" + indent + "STATUS" + indent + "Data center ID" + indent + "VM ID"
				+ indent + indent+ "Time" + indent + "Start Time" + indent + "Finish Time" + indent + "Depth");
		DecimalFormat dft = new DecimalFormat("###.##");
		for (Job job : list) {
			Log.print(indent + job.getCloudletId() + indent + indent);
			if (job.getClassType() == ClassType.STAGE_IN.value) {
				Log.print("Stage-in");
			}
			for (Task task : job.getTaskList()) {
				Log.print(task.getCloudletId() + ",");
			}
			Log.print(indent);

			if (job.getCloudletStatus() == Cloudlet.SUCCESS) {
				Log.print("SUCCESS");
				Log.printLine(indent + indent + job.getResourceId() + indent + indent + indent + job.getVmId() + indent
						+ indent + indent + dft.format(job.getActualCPUTime()) + indent + indent
						+ dft.format(job.getExecStartTime()) + indent + indent + indent
						+ dft.format(job.getFinishTime()) + indent + indent + indent + job.getDepth());
			} else if (job.getCloudletStatus() == Cloudlet.FAILED) {
				Log.print("FAILED");
				Log.printLine(indent + indent + job.getResourceId() + indent + indent + indent + job.getVmId() + indent
						+ indent + indent + dft.format(job.getActualCPUTime()) + indent + indent
						+ dft.format(job.getExecStartTime()) + indent + indent + indent
						+ dft.format(job.getFinishTime()) + indent + indent + indent + job.getDepth());
			}
		}
	}
}

WorkflowSimBasicExample1代码_第1张图片

package org.workflowsim.examples;

import java.io.File;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.LinkedList;
import java.util.List;
import org.cloudbus.cloudsim.Cloudlet;
import org.cloudbus.cloudsim.CloudletSchedulerSpaceShared;
import org.cloudbus.cloudsim.DatacenterCharacteristics;
import org.cloudbus.cloudsim.HarddriveStorage;
import org.cloudbus.cloudsim.Host;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Storage;
import org.cloudbus.cloudsim.VmAllocationPolicySimple;
import org.cloudbus.cloudsim.VmSchedulerTimeShared;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.provisioners.BwProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.RamProvisionerSimple;
import org.workflowsim.CondorVM;
import org.workflowsim.Task;
import org.workflowsim.WorkflowDatacenter;
import org.workflowsim.Job;
import org.workflowsim.WorkflowEngine;
import org.workflowsim.WorkflowPlanner;
import org.workflowsim.utils.ClusteringParameters;
import org.workflowsim.utils.OverheadParameters;
import org.workflowsim.utils.Parameters;
import org.workflowsim.utils.ReplicaCatalog;
import org.workflowsim.utils.Parameters.ClassType;

/**

 这个WorkflowSimExample 创建了
 	一个工作流计划器(workflow planner)、
 	一个工作流引擎(workflow engine)、
 	一个调度器(schedulers)、
 	一个数据中心(data centers)、
 	20个虚拟机(20 vms)。
 您至少应该更改daxPath。您还可以更改其他参数。

 */

public class WorkflowSimBasicExampleTest {

	protected static List<CondorVM> createVM(int userId, int vms) {
		//创建用于存放虚拟机的容器。该列表稍后被传递给代理
		LinkedList<CondorVM> list = new LinkedList<>();

		//  虚拟机参数(VM Parameters)
		long size = 10000; // image size (MB) 划分的虚拟硬盘大小
		int ram = 512; // vm memory (MB) 虚拟内存
		int mips = 1000; // mips:Million Instructions Per Second   衡量cpu的性能指标
		long bw = 1000;	// bindwidth:带宽		任务到任务的传输成本
		int pesNumber = 1; // number of cpus    cpu的数量
		String vmm = "Xen"; // VMM name    VMM: Virtual Machine Manager    虚拟机管理名称

		// create VMs  创建虚拟机集群
		CondorVM[] vm = new CondorVM[vms];	// CondorVm扩展一个Vm
		for (int i = 0; i < vms; i++) {
			double ratio = 1.0;
			vm[i] = new CondorVM(i, userId, mips * ratio, pesNumber, ram, bw, size, vmm,
					new CloudletSchedulerSpaceShared());
        // cloudletschedulerspacesshared 实现了由虚拟机执行的调度策略  云任务调度时间共享,所有任务同时运行
			list.add(vm[i]);
		}
		return list;
	}

	// STATIC METHODS ///
	/**
	   创建main()以运行此示例。此示例只有一个数据中心(datacenter)和一个存储(storage)
	 */
	public static void main(String[] args) {
		try {
			// 第一步:初始化WorkflowSim包.
			/**
			 但是,虚拟机的确切数量不一定是vmNum。如果数据中心或主机没有足够的资源,那么准确的vmNum可能会小于vmNum。当心
			 */
			int vmNum = 20;// number of vms; 虚拟机的数量
			/**
			 应该根据实际物理路径进行更改
			 */
			// String daxPath ="/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";
			String daxPath = "F:\\WorkFlowSim\\WorkflowSim-1.0-master\\config\\dax\\Montage_100.xml";

			File daxFile = new File(daxPath);
			if (!daxFile.exists()) {
				Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");
				return;
			}

			/**
			  由于我们使用MINMIN调度算法(MINMIN scheduling algorithm),所以规划算法(planning algorithm)应该是INVALID,这样规划程序就不会覆盖调度程序的结果
			 */
			// 枚举类型 直接引用
			Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
			Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
			ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

			/**
			 * No overheads 没有费用开支
			 */
			OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);

			/**
			 * No Clustering 没有集群
			 */
			ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
			ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

			// 初始化静态参数
			Parameters.init(vmNum, daxPath, null, null, op, cp, sch_method, pln_method, null, 0);
			ReplicaCatalog.init(file_system);

			// 在创建任何实体之前
			int num_user = 1; // number of grid users	网格用户数
			Calendar calendar = Calendar.getInstance();	// 日历
			boolean trace_flag = false; // mean trace events  平均跟踪事件

			// 初始化CloudSim库
			CloudSim.init(num_user, calendar, trace_flag);
			
			// 创建数据中心 datacenter0
			WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0");

			// 创建带有一个调度器的WorkflowPlanner工作流计划器。
			WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
			
			// 创建一个工作流引擎
			WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
			
			// 创建虚拟机列表。虚拟机的userId基本上就是调度程序的id控制这个vm
			List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

			// 将这个虚拟机列表提交给这个WorkflowEngine(将包含虚拟机的列表发送给代理必须创建)		
			wfEngine.submitVmList(vmlist0, 0);

			// 将数据中心(data centers)绑定到调度程序
			wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);
			
			// 开始仿真
			CloudSim.startSimulation();
			// 获取已接收的作业列表
			List<Job> outputList0 = wfEngine.getJobsReceivedList();
			// 仿真结束
			CloudSim.stopSimulation();
			// 打印仿真结果
			printJobList(outputList0);
		} catch (Exception e) {
			Log.printLine("The simulation has been terminated due to an unexpected error");
		}
	}
	
	protected static WorkflowDatacenter createDatacenter(String name) {	// 创建数据中心

		// 以下是创建PowerDatacenter所需的步骤:
		// 1. 我们需要创建一个列表来存储一个或多个machine
		List<Host> hostList = new ArrayList<>();

		// 2. 一台机器包含一个或多个pe或内核/cpu(CPUs/Cores)。因此,应该在创建Machine之前创建一个列表来存储这些PE。
		// Pe代表cpu单元,字段:pe状态和分配策略peProvisioner 同一台主机所有pe有相同的处理能力
		for (int i = 1; i <= 20; i++) {
			List<Pe> peList1 = new ArrayList<>();
			int mips = 2000;
			
			// 3. 创建PE并将其添加到列表中。
			// 对于四核机器,需要列出4个pe(PEid,MIPS速度):
			peList1.add(new Pe(0, new PeProvisionerSimple(mips))); // 需要存储Pe id和MIPS等级
			peList1.add(new Pe(1, new PeProvisionerSimple(mips)));
			
			// 下面是声明了host的参数,并将其添加到机器列表中
			// host参数:hostID、内存、存储容量、带宽

			int hostId = 0;
			int ram = 2048; // host memory (MB)
			long storage = 1000000; // host storage
			int bw = 10000;
			hostList.add(new Host(hostId, new RamProvisionerSimple(ram), new BwProvisionerSimple(bw), storage, peList1,
					new VmSchedulerTimeShared(peList1))); // 创建机器完成
		
		}

		/*
		 4、属性创建一个DatacenterCharacteristics对象数据中心的属性:体系结构,操作系统,列表机器,
		 分配策略:时间或空间共享,时区及其价格(G$/Pe时间单位)。
		*/
		String arch = "x86"; // system architecture
		String os = "Linux"; // operating system
		String vmm = "Xen";
		double time_zone = 10.0; // time zone this resource located	该资源所在的时区
		double cost = 3.0; // the cost of using processing in this resource	在此资源中使用处理的成本
		double costPerMem = 0.05; // the cost of using memory in this resource	在此资源中使用内存的成本
		double costPerStorage = 0.1; // the cost of using storage in this resource	在此资源中使用存储的成本
		double costPerBw = 0.1; // the cost of using bw in this resource	在此资源中使用bw带宽的成本
		LinkedList<Storage> storageList = new LinkedList<>(); // we are not adding SAN devices by now	我们现在还没有添加SAN设备
		WorkflowDatacenter datacenter = null;

		DatacenterCharacteristics characteristics = new DatacenterCharacteristics(arch, os, vmm, hostList, time_zone,
				cost, costPerMem, costPerStorage, costPerBw);

		// 5. 最后,我们需要创建一个数据中心对象。
		// 数据中心内的带宽,以MB/s为单位。
		
		int maxTransferRate = 15;// 这个数字来自futuregrid网站,你可以指定你的带宽bw

		try {
			// 这里我们将带宽设置为15MB/s
			HarddriveStorage s1 = new HarddriveStorage(name, 1e12);
			s1.setMaxTransferRate(maxTransferRate);
			storageList.add(s1);
			datacenter = new WorkflowDatacenter(name, characteristics, new VmAllocationPolicySimple(hostList),
					storageList, 0);
		} catch (Exception e) {
			e.printStackTrace();
		}
		return datacenter;
	}

	/**
	 * Prints the job objects
	 *
	 * @param list list of jobs
	 */
	protected static void printJobList(List<Job> list) {
		String indent = "    ";
		Log.printLine();
		Log.printLine("========== OUTPUT ==========");
		Log.printLine("Job ID" + indent + "Task ID" + indent + "STATUS" + indent + "Data center ID" + indent + "VM ID"
				+ indent + indent+ "Time" + indent + "Start Time" + indent + "Finish Time" + indent + "Depth");
		DecimalFormat dft = new DecimalFormat("###.##");
		for (Job job : list) {
			Log.print(indent + job.getCloudletId() + indent + indent);
			if (job.getClassType() == ClassType.STAGE_IN.value) {
				Log.print("Stage-in");
			}
			for (Task task : job.getTaskList()) {
				Log.print(task.getCloudletId() + ",");
			}
			Log.print(indent);

			if (job.getCloudletStatus() == Cloudlet.SUCCESS) {
				Log.print("SUCCESS");
				Log.printLine(indent + indent + job.getResourceId() + indent + indent + indent + job.getVmId() + indent
						+ indent + indent + dft.format(job.getActualCPUTime()) + indent + indent
						+ dft.format(job.getExecStartTime()) + indent + indent + indent
						+ dft.format(job.getFinishTime()) + indent + indent + indent + job.getDepth());
			} else if (job.getCloudletStatus() == Cloudlet.FAILED) {
				Log.print("FAILED");
				Log.printLine(indent + indent + job.getResourceId() + indent + indent + indent + job.getVmId() + indent
						+ indent + indent + dft.format(job.getActualCPUTime()) + indent + indent
						+ dft.format(job.getExecStartTime()) + indent + indent + indent
						+ dft.format(job.getFinishTime()) + indent + indent + indent + job.getDepth());
			}
		}
	}
}

你可能感兴趣的:(WorkFlow,java,c++,算法)