如何解决如何通过Python中的apply_async传递gurobipy.Model变量?
摘要 我是python并行计算的新手。我使用Gurobi建立了DEA模型,并计算了每个DMU(决策制定单位)的效率。为了减少总的计算时间,我将模型分为两个步骤来解决:
- 第一步,定义了k个模型
- Step2,并行优化了k个模型。
Step1是正确的并且可执行。但是在步骤2中,当我通过Solve()
将对象参数“ gurobipy.Model”传递到定义的函数multiprocessing.Pool.apply_async()
中时,出现了TypeError: can't pickle PyCapsule objects
。并且函数Solve()
未执行。我该如何使用apply_async
函数来传递gurobipy.Model
变量,或者是否有任何其他并行方法来传递gurobipy.Model
变量?
详细信息 以下是主程序
from multiprocessing import Pool
import multiprocessing
from gurobipy import *
import gurobipy as gp
import numpy as np
import pandas as pd
import time
def runcomputationgurobi(Root,FileName,ResultFileName,numInput,numOutput):
'''
input:root path,file name,number of input unit,number of output unit
output:Excel file (including DMU number,best solution(efficiency),modeling time,solving time)
'''
#Data peprocessing
df = pd.read_csv(f"{Root}/{FileName}",header=None) #download data
D = np.array(df) #convert to ndarray
transD = D.transpose() #transpose ndarray
outputs = [] #empty list to store best solutions
scale,S = transD.shape #scale : numInput+numOutput;S : total number of DMUs
print("Build k models...")
#Step1: Modeling
'''
call BuildGurobiModel(list of download data,number of output unit)
return:k modeling times(list[float])、k Gurobi models(list[gurobipy.Model])
'''
build_time_house,model_house = BuildGurobiModels(transD,numOutput)
print("Parallel computing k models...")
#Step2: Parallel optimization model
'''
call Solve(kth Gurobi model)
return:k best solutions(efficiency)(float)、k solving times(float)
'''
temp = []
pool = multiprocessing.Pool(4)
print("Start parallel solve")
start_time = time.time()
for k in range(S):
temp.append([k+1,build_time_house[k],pool.apply_async(Solve,args=(model_house[k],))])
pool.close()
pool.join()
print(f"{time.time() - start_time}s")
for k,build_time,_return in temp:
outputs.append([k,_return.get()[0],_return.get()[1]]) #_return.get()=(obj_efficiency,solve_time,)
#Output Excel
pd.DataFrame(np.array(outputs)).to_excel(f"{Root}/result_parallel_matrix_ChgRHS.xlsx",header=["DMU","obj_efficiency","build_time","solve_time"],index=False)
if __name__=="__main__":
rootPath = "C:/Users/MB516/Documents/source/Python Scripts/Parallel_processing"
file_name = "test.csv"
resultfile_name = "result.csv"
numInput = 2
numOutput = 3
start_time = time.time()
runcomputationgurobi(rootPath,file_name,resultfile_name,numOutput)
parallel_solveTime = time.time() - start_time
print(f"solveTime:{parallel_solveTime}")
建立k个模型:
def BuildGurobiModels(transD,numOutput):
'''
input: list of download data(list),number of input unit(int),number of output unit(int)
return: k modeling times(list[float]),k Gurobi models(list[gurobipy.Model])
'''
#Data peprocessing
model_house = []
build_time_house = []
scale,S = transD.shape #scale : numInput+numOutput;S : total number of DMUs
for k in range(S):
#Define model
start_time = time.time()
model = gp.Model(f"NaiveDEA{k+1}")
model.setParam("OutputFlag",0) # 0: disables solver output
model.setParam("Method",0) # 0: primal simplex
#Define variables
#define lambda
lambdaarray = model.addVars(S,lb = 0.0,ub = GRB.INFINITY,vtype = GRB.CONTINUOUS)
#define theta
theta = model.addVar(lb = -GRB.INFINITY,vtype=GRB.CONTINUOUS,name="theta")
model.update()
#Set the objective
model.setobjective(theta,GRB.MINIMIZE)
#Define constraints
#input constraint
model.addConstrs((LinExpr(transD[i],lambdaarray.values()) <=transD[i,k]*theta for i in range(numInput)),name = "Input")
model.update()
#output constraint
model.addConstrs((LinExpr(transD[j],lambdaarray.values()) >=transD[j,k] for j in range(numInput,scale)),name = "Output")
model.update()
#convexity constraint
model.addConstr(quicksum(lambdaarray)==1,name="Convexity")
model.update()
build_time = time.time() - start_time #modeling time
model_house.append([model])
build_time_house.append([build_time])
return build_time_house,model_house
解决第k个模型:
def Solve(model):
'''
input: kth Gurobi model(gurobipy.Model)
return:k best solutions(efficiency)(float),k solving times(float)
'''
print("Start Solve!!!!!!")
#Solve
start_time = time.time()
model.optimize()
solve_time = time.time() - start_time
#print
objvalue = model.getobjective()
getobjv = objvalue.getValue()
Build k models...
Parallel computing k models...
0.53267502784729s
Traceback (most recent call last):
File "c:/Users/MB516/Documents/source/Python Scripts/Parallel_processing/ENGLIFH_TEST_ParaLLEL.py",line 124,in <module>
runcomputationgurobi(rootPath,numOutput)
File "c:/Users/MB516/Documents/source/Python Scripts/Parallel_processing/ENGLIFH_TEST_ParaLLEL.py",line 47,in runcomputationgurobi
outputs.append([k,)
TypeError: can't pickle PyCapsule objects
它没有执行步骤2的“求解”功能,因为它没有打印出“开始求解!!!!!”在功能Solve()
中。还有以下程序
for k,_return.get()[1]]) #_return.get()=(obj_efficiency,)
具有TypeError: can't pickle PyCapsule objects
。我怎么解决这个问题 ?预先感谢您的回答!
环境
解决方法
这是您可以在Python中并行创建和求解多个模型的方法:
import multiprocessing as mp
import gurobipy as gp
def solve_model(input_data):
with gp.Env() as env,gp.Model(env=env) as model:
# define model
model.optimize()
# retrieve data from model
if __name__ == '__main__':
with mp.Pool() as pool:
pool.map(solve_model,[input_data1,input_data2,input_data3]
有关更多信息,请参阅full guide。
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。