Steam平台while True: learn() 关卡Medx的python算法

引言

本文编写python程序,以模拟的思路,用枚举法寻找最优策略。

关卡Medx的策略空间

Medx包含三个决策树,三个输出流。决策树的标签、与输出流的连接共同组成了策略空间。

import itertools as itl

colorScheme = itl.product(list('RGBA'), repeat=6)

colorScheme = list(colorScheme)

def group222(line):
    scheme = []
    it = iter(line)
    for i in range(3):
        left, right = next(it), next(it)
        scheme.append((left, right))
    return scheme

colorScheme = [group222(s) for s in colorScheme]
colorScheme
>> 
[[('R', 'R'), ('R', 'R'), ('R', 'R')],
 [('R', 'R'), ('R', 'R'), ('R', 'G')],
 [('R', 'R'), ('R', 'R'), ('R', 'B')],
 [('R', 'R'), ('R', 'R'), ('R', 'A')],
 [('R', 'R'), ('R', 'R'), ('G', 'R')],
......

import pickle as pk

pk.dump(colorScheme, open('colorScheme.dat', 'wb'))

生成RGBA(A指any),的笛卡尔积,每个决策树有两个标签所以一共六个。group222将它们打包成两个两个一组以备后续使用。
连接策略的生成类似。

决策树,收集器对象设计

要反应问题的计算图,同时具有一定的可拓展性。

from collections import Counter

class DT:
    
    def __init__(self, unext=None, dnext=None):
        self.unext = unext
        self.dnext = dnext
        
    def config(self, ucolor, dcolor):
        self.ucolor = ucolor
        self.dcolor = dcolor
    
    def activate(self, package):
        color, amount = package
                
        umatch = color == self.ucolor or self.ucolor == 'A'
        dmatch = color == self.dcolor or self.dcolor == 'A'
        
        uamount = damount = 0
        if umatch == dmatch:
            uamount = damount = amount / 2
        else:
            uamount = umatch * amount
            damount = dmatch * amount
                
        self.unext.activate((color, uamount))
        self.dnext.activate((color, damount))

class Collector(Counter):
    
    def __init__(self):
        self.update(dict(R=0, G=0, B=0))
    
    def activate(self, package):
        color, amount = package
        self[color] += amount
    
    def reset(self):
        self['R'] = 0
        self['G'] = 0
        self['B'] = 0
    

设计activate,是为了让决策树和收集器统一接口。收集器继承collections.Counter,获得方便的合并操作(后面作用体现)。

在策略空间中寻找最小误差

from node import DT, Collector

import pickle as pk

import itertools as it

# 加载颜色空间和连接空间
colorScheme = pk.load(open('colorScheme.dat', 'rb'))
linkScheme = pk.load(open('linkScheme.dat', 'rb'))

# 布置决策树结点
widgets = [DT() for i in range(3)]
# 布置收集器
collectors = [Collector() for i in range(4)]

# 连接计算图
widgets[0].__init__(widgets[1], widgets[2])
widgets[1].__init__(collectors[0], collectors[1])
widgets[2].__init__(collectors[2], collectors[3])

# 记录各个策略的表现
exitsRecords = []

for colorS, linkS in it.product(colorScheme, linkScheme):
    # 在颜色空间和连接空间的笛卡尔积中查找
    
    # 按颜色策略配置决策树
    for i in range(3):
        ucolor, dcolor = colorS[i]
        widgets[i].config(ucolor, dcolor)
    
    # 安放输出流
    exits = [Collector() for i in range(3)]
    
    # 数据输入,激活根结点
    widgets[0].activate(('R', 36))
    widgets[0].activate(('G', 40))
    widgets[0].activate(('B', 26))
    
    # 按连接策略合并收集器
    for i, c in enumerate(collectors):
        exits[linkS[i]].update(c)
        c.reset()
    
    # 统计本策略误差
    e0, e1, e2 = exits
    if e0['B'] != 0 or e1['R'] != 0 or e2['B'] != 0 :
        continue
        
    diff0 = (e0['R'] + e0['G'] - 19) ** 2
    diff1 = (e1['G'] + e1['B'] - 16) ** 2
    diff2 = (e2['R'] + e2['G'] - 15) ** 2
    cost = diff0 + diff1 + diff2
    exitsRecords.append((cost, colorS, linkS, (e0, e1, e2)))

from operator import itemgetter

sorted(exitsRecords, key=itemgetter(0))[0]
>>
(914.0,
 [('R', 'B'), ('R', 'G'), ('R', 'B')],
 (0, 2, 2, 1),
 (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
  Collector({'R': 0, 'G': 10.0, 'B': 26}),
  Collector({'R': 0, 'G': 30.0, 'B': 0.0})))

程序输出的即最佳策略(之一),如图

medx.png

本关所有最佳策略

输出结果应做如下解读:

  • 误差值(本关914已最小)
  • 决策树配色方案,按左、上、右顺序
  • 4个终端出口连接的输出流
  • 输出流获得色块的分布
[(914.0,
  [('R', 'B'), ('R', 'G'), ('R', 'B')],
  (0, 2, 2, 1),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('R', 'B'), ('R', 'G'), ('G', 'A')],
  (0, 2, 2, 1),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('R', 'B'), ('R', 'G'), ('B', 'R')],
  (0, 2, 1, 2),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('R', 'B'), ('R', 'G'), ('A', 'G')],
  (0, 2, 1, 2),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('R', 'B'), ('G', 'R'), ('R', 'B')],
  (2, 0, 2, 1),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('R', 'B'), ('G', 'R'), ('G', 'A')],
  (2, 0, 2, 1),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('R', 'B'), ('G', 'R'), ('B', 'R')],
  (2, 0, 1, 2),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('R', 'B'), ('G', 'R'), ('A', 'G')],
  (2, 0, 1, 2),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('B', 'R'), ('R', 'B'), ('R', 'G')],
  (2, 1, 0, 2),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('B', 'R'), ('R', 'B'), ('G', 'R')],
  (2, 1, 2, 0),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('B', 'R'), ('G', 'A'), ('R', 'G')],
  (2, 1, 0, 2),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('B', 'R'), ('G', 'A'), ('G', 'R')],
  (2, 1, 2, 0),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('B', 'R'), ('B', 'R'), ('R', 'G')],
  (1, 2, 0, 2),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('B', 'R'), ('B', 'R'), ('G', 'R')],
  (1, 2, 2, 0),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('B', 'R'), ('A', 'G'), ('R', 'G')],
  (1, 2, 0, 2),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0}))),
 (914.0,
  [('B', 'R'), ('A', 'G'), ('G', 'R')],
  (1, 2, 2, 0),
  (Collector({'R': 36, 'G': 0.0, 'B': 0.0}),
   Collector({'R': 0, 'G': 10.0, 'B': 26}),
   Collector({'R': 0, 'G': 30.0, 'B': 0.0})))]

你可能感兴趣的:(Steam平台while True: learn() 关卡Medx的python算法)