excel2xml.py
# -*- coding: cp936 -*-
import os , re
import copy
import codecs
import json
from xlrd import open_workbook #http://pypi.python.org/pypi/xlrd
from collections import OrderedDict
from xml2cpp import XmlToCpp
from xml2cshape import XmlToCShape
import excel2type
import types
import time
import sqlite3
import random
import random_table
from print_color import Color
import sys
reload(sys)
sys.setdefaultencoding("utf8")
Golbal_Config={}
main_key_table = {}
primary_key_map = {}
NameMap = {}
continue_filed_table = {}
no_cpp = 0
log_string = ''
config_name = "conf.ini"
print_color = Color()
type_array = ["BOOL", "INT", "UINT", "FLOAT","STRING","INTARRAY","FLOATARRAY"
,"STRINGARRAY","ENUM","ENUMARRAY","DATE","TIME","DATETIME"]
# ""
# ""
def log(str):
global log_string
log_string = log_string + str + '\n'
def print_error(str , assert_flag = 1):
print_color.print_red_text(str)
if assert_flag:
sys.exit(1)
def my_text_(text):
"""Return the translated text from json text."""
v = ("")
if text[:len(v[0])] != v[0]: return text
if text[-len(v[1]):] != v[1]: return text
return text[len(v[0]) : -len(v[1])]
def exist_chinese(text):
zh_pattern = re.compile(u'[\u4e00-\u9fa5]+')
return zh_pattern.search(text)
def valid_date(text):
zh_pattern = re.compile(u'^\d{4}\/\d{1,2}\/\d{1,2}')
return zh_pattern.match(text)
def valid_time(text):
zh_pattern = re.compile(u'^\d{1,2}\:\d{1,2}\:\d{1,2}$')
return zh_pattern.match(text)
def convert_data(text):
time_array = time.strptime(text, "%Y/%m/%d")
return int(time.mktime(time_array))
def convert_time(text):
print "convert_time ", text
time_array = text.split(':')
if 0 == len(time_array):
print_error("time is empty")
v = int(time_array[0])
i = 1;
while i < len(time_array):
v = v * 60 + int(time_array[i])
i = i + 1
while i < 3:
v *= 60
i = i + 1
return v
def convert_encode(data , native):
if type(data) is types.UnicodeType:
data = data.encode("utf-8").strip()
elif type(data) is types.FloatType:
if not native or data == int(data):
data = int(round(data , 0))
return data
def get_table_key(schema):
table_key = ''
if "0" in schema["key"]:
table_key = schema["key"]["0"]
if len(table_key) == 0:
for name in schema["define"]:
return name
return table_key
def create_table_sql(data , schema, table_name, field_name_map):
table_key = get_table_key(schema)
create_sql = "CREATE TABLE `%s`(" % table_name
for key , value in data.items():
value_type = "TEXT(100)"
if key == "xml_child":
continue
elif type(value) is types.IntType:
value_type = "INTEGER"
elif type(value) is types.UnicodeType or type(value) is types.StringType:
value_type = "TEXT(100)"
elif type(value) is types.FloatType:
value_type = "REAL"
else:
print "create_table_sql:the type is not deal: type = %s" % type(value)
create_sql += " `%s` %s" % (field_name_map[key], value_type)
if key == table_key:
create_sql += " NOT NULL"
create_sql += ','
create_sql += " PRIMARY KEY (`%s`))" % table_key
return create_sql
def update_sqllite(database_name, table_name, create_sql , entities):
connect = sqlite3.connect(database_name)
connect.execute("drop table if exists %s" % table_name)
connect.execute(create_sql)
for entity in entities:
if "xml_child" not in entity:
key_str = ''
value_str = ''
for key , value in entity.items():
key_str += "%s," % key
if type(value) is types.IntType:
value_str += "%d," % value
elif type(value) is types.UnicodeType or type(value) is types.StringType:
value_str += "\"%s\"," % value
elif type(value) is types.FloatType:
value_str += "%f," % value
else :
print "update_sqllite:the type is not deal: type = %s , name = %s value = " % (type(value) , key) , value
key_str = key_str[0:-1]
value_str = value_str[0:-1]
insert_sql = "insert into %s(%s) values(%s);" % (table_name,key_str,value_str)
connect.execute(insert_sql)
connect.commit()
def assembly_item(items, field_type_map):
temp = ''
#print items
for key , value in items:
if key in field_type_map:
if field_type_map[key] == "DATE":
value = convert_data(value)
elif field_type_map[key] == "TIME":
value = convert_time(value)
elif field_type_map[key] == "DATETIME":
item_array = value.split();
value = 0;
array_length = len(item_array)
if array_length > 0:
value = convert_data(item_array[0].strip())
if array_length > 1:
value += convert_time(item_array[1].strip())
elif field_type_map[key] == "STRING":
value = value.replace('\\\"','"')
temp += u'%s = "%s" ' % (key , value)
return temp
def assembly_xml(entities , node_name_array , layer_index, field_type_map):
content = ''
black = ' '
node_name = 'item'
if layer_index < len(node_name_array):
node_name = node_name_array[layer_index]
for i in range(layer_index):
black += ' '
for entity in entities:
if "xml_child" not in entity:
content += black
content += u'<%s ' % node_name
#tmp = [u'%s = "%s"' % (key , value) for key , value in entity.items()]
#content += u' '.join(map(str, tmp))
content += assembly_item(entity.items(), field_type_map)
content += u' />\n'
else:
content += black
xml_child = entity["xml_child"]
del entity["xml_child"]
content += u'<%s ' % node_name
#tmp = [u'%s = "%s"' % (key , value) for key , value in entity.items()]
#content += u' '.join(map(str, tmp))
content += assembly_item(entity.items(), field_type_map)
content += u' >\n'
content += assembly_xml(xml_child , node_name_array , layer_index + 1, field_type_map)
content += u'%s>\n' % node_name
return content
def write_to_xml_file(write_file,entities,node_name_array, field_type_map):
file_name = "Xml" + os.sep + write_file
f = codecs.open(file_name, "w", "utf-8")
f.write(u'\n')
f.write(u'<%s>\n' % "root")
#print field_type_map
content = assembly_xml(entities , node_name_array , 0, field_type_map)
f.write(content);
f.write(u'%s>' % "root");
f.close()
print "create xml file : %s " % file_name
def is_array(separator_array, string_var):
for separator in separator_array:
index = string_var.find(separator)
if index >=0:
return True
return False
def assembly_single_lua_value(key, value, field_type_map):
if field_type_map[key.upper()] in ["INTARRAY", "FLOATARRAY"]:
if type(value) is types.UnicodeType or type(value) is types.StringType:
value = value.replace('|',',')
return "{%s} \n" %(value)
elif "STRINGARRAY" == field_type_map[key.upper()] :
value_array = value.split('|')
content = "{"
for item in value_array:
content += '"%s",' %(value)
content += "}"
return content
elif "BOOL" == field_type_map[key.upper()]:
if 0 == value:
return 'false\n'
else:
return 'true\n'
elif "STRING" == field_type_map[key.upper()]:
return '"%s" \n' %(value)
else:
return "%s \n" %(value)
def assembly_lua_value(key, value, field_type_map):
if field_type_map[key.upper()] in ["INTARRAY", "FLOATARRAY"]:
if type(value) is types.UnicodeType or type(value) is types.StringType:
value = value.replace('|',',')
return "{%s}" %(value)
elif "STRINGARRAY" == field_type_map[key.upper()] :
value_array = []
if type(value) is types.UnicodeType or type(value) is types.StringType:
value_array = value.split('|')
else:
value_array.append(value)
content = "{"
for item in value_array:
content += '"%s",' %(item)
content += "}"
return content
elif "BOOL" == field_type_map[key.upper()]:
if 0 == value:
return 'false'
else:
return 'true'
elif "STRING" == field_type_map[key.upper()]:
return '"%s"' %(value)
else:
return "%s" %(value)
def assembly_lua(entities , field_name_map ,main_key, single, default_table, schema, field_type_map):
content = ''
black = ' '
index = 1
for entity in entities:
table_key = "%d" % index
data_table = copy.deepcopy(default_table)
for key , value in entity.items():
if "class" == key:
key = "type"
if key == "xml_child":
for child_entity in entity["xml_child"]:
for child_key , child_value in child_entity.items():
data_table[field_name_map[child_key]].append(child_value)
elif key == main_key:
table_key = str(value)
else:
data_table[field_name_map[key]] = value
key_format = ''
if not single:
if len(main_key) > 0 and "STRING" == field_type_map[main_key]:
key_format = '["%s"]' % table_key
else:
key_format = '[%s]' % table_key
if 1 == len(data_table):
item_content = "config_table%s = " % key_format
for key, value in data_table.items():
if type(value) is types.ListType :
array_content = ''
for item_value in value:
array_content += assembly_lua_value(key, item_value, field_type_map)+','
item_content += "{%s} \n" %(array_content)
else:
item_content += assembly_lua_value(key, value, field_type_map)
item_content += '\n'
else:
item_content = "config_table%s = {\n" % key_format
for key, value in data_table.items():
key_type = field_type_map[key.upper()]
if type(value) is types.ListType :
array_content = ''
for item_value in value:
array_content += assembly_lua_value(key, item_value, field_type_map)+','
item_content += "\t%s = {%s},\n" %(key,array_content)
elif key_type in ["INTARRAY", "FLOATARRAY"]:
if type(value) is types.UnicodeType or type(value) is types.StringType:
value = value.replace('|',',')
item_content += "\t%s = {%s},\n" %(key,value)
elif "STRINGARRAY" == key_type :
if type(value) is types.UnicodeType or type(value) is types.StringType:
value_array = value.split('|')
item_content += "\t%s ={" %(key)
for item in value_array:
item_content += '"%s",' %(item)
item_content += "},\n"
else:
item_content += '\t%s = {"%d"},\n' %(key,value)
elif "BOOL" == key_type:
if 0 == value:
item_content += '\t%s = false,\n' %(key)
else:
item_content += '\t%s = true,\n' %(key)
elif "STRING" == key_type or "DATE" == key_type or "TIME" == key_type or "DATETIME" == key_type :
item_content += '\t%s = "%s",\n' %(key,value)
else:
item_content += "\t%s = %s,\n" %(key,value)
item_content += "}\n\n"
content += item_content
index = index + 1
return content
def write_to_lua_file(write_file, entities, field_name_map, cpp_schema, schema, single, field_type_map):
file_name = "Lua" + os.sep + write_file + '.lua'
f = codecs.open(file_name, "w", "utf-8")
f.write(u'local config_table = {}\n\n')
main_key = ''
if "0" in schema["key"]:
main_key = schema["key"]["0"]
default_table = {}
for key , value in field_type_map.items():
if key == main_key:
continue
#print schema
if 0 == schema["define"][key]:
if "BOOL" == value or "INT" == value or "ENUM" == value or "ENUMARRAY" == value:
default_table[field_name_map[key]] = 0
elif "FLOAT" == value :
default_table[field_name_map[key]] = 0.0
elif "STRING" == value or "DATE" == value or "TIME" == value or "DATETIME" == value:
default_table[field_name_map[key]] = ''
else:
default_table[field_name_map[key]] = []
else:
default_table[field_name_map[key]] = []
content = assembly_lua(entities , field_name_map , main_key, single, default_table, schema, field_type_map)
f.write(content);
f.write(u'\n\nreturn config_table');
f.close()
print "create Lua file : %s " % file_name
def get_container(container , layer_index):
if 0 == layer_index:
return container;
#print "layer_index = %d len(container) = %d" % (layer_index, len(container) )
#print container[len(container) - 1]
return get_container(container[len(container) - 1]["xml_child"] , layer_index - 1)
def add_cpp_schema(src , dest , schema):
for entity in src:
for key, value in entity.items():
if key == "xml_child":
if "xml_child" not in dest:
dest["xml_child"] = OrderedDict()
add_cpp_schema(entity["xml_child"] , dest["xml_child"] , schema)
elif key not in dest:
dest[key] = value
elif type(dest[key] ) is types.IntType and value < 0:
dest[key] = value
elif type(value) is types.UnicodeType or type(value) is types.StringType:
if is_array(schema["separator"] , value):
dest[key] = value
elif type(value) is types.FloatType and type(dest[key] ) is types.IntType:
dest[key] = value
def get_type_value(field_type, separator):
if ("BOOL" == field_type or "UINT" == field_type or "ENUM" == field_type or "ENUMARRAY" == field_type
or "DATE" == field_type or "TIME" == field_type or "DATETIME" == field_type):
return 0
elif "INT" == field_type :
return -1
elif "FLOAT" == field_type:
return 0.1
elif "STRING" == field_type:
return "a"
elif "INTARRAY" == field_type:
return ("0%s0" % separator)
elif "FLOATARRAY" == field_type:
return ("0.1%s0.1" % separator)
elif "STRINGARRAY" == field_type:
return ("a%sa" % separator)
else:
print_error("the type %s is not deal" , field_type)
def define_cpp_schema(dest, schema, field_type_map):
for key, value in schema["define"].items():
if value == 1:
if "xml_child" not in dest:
dest["xml_child"] = OrderedDict()
dest["xml_child"][key] = get_type_value(field_type_map[key], schema["separator"][0])
else:
dest[key] = get_type_value(field_type_map[key], schema["separator"][0])
def define_init_type(schema_define , dest , layer_index):
child_flag = 0
for attribute_name in schema_define:
if schema_define[attribute_name] > layer_index:
child_flag = 1
if attribute_name not in dest and schema_define[attribute_name] == layer_index:
dest[attribute_name] = 0
if child_flag:
if "xml_child" not in dest:
dest["xml_child"] = OrderedDict();
define_init_type(schema_define , dest["xml_child"] , layer_index + 1)
def get_cpp_schema(entities, schema, field_type_map, data_type = False):
single = 1;
if len(entities) > 1 or "array_flag" in schema :
single = 0;
cpp_schema = OrderedDict()
if data_type:
add_cpp_schema(entities, cpp_schema, schema)
else:
define_cpp_schema(cpp_schema, schema, field_type_map)
return single , cpp_schema;
def convert_type(data , data_type):
if "BOOL" == data_type or "INT" == data_type :
data = int(data)
elif "FLOAT" == data_type:
data = float(data)
elif "STRING" == data_type:
data = str(data)
return data
def get_attribute_value(schema, attribute_name, data, data_type):
global NameMap
if type(data) is types.UnicodeType or type(data) is types.StringType:
data = data.strip()
name_map = attribute_name
if name_map in NameMap:
value_array = []
if "ENUMARRAY" == data_type:
value_array = data.split('|')
else:
value_array.append(data)
data = 0
for v in value_array:
temp = v.upper()
if temp in NameMap[name_map]:
data = data + convert_encode(NameMap[name_map][temp] , False)
else:
print_error("attribute name %s :the %s of the attibute value is not in Name map" % (attribute_name , v))
else:
if data_type in ["INTARRAY","FLOATARRAY","STRINGARRAY"]:
separator = ','
for x in schema['separator']:
if data.find(x) > 0:
separator = x
break
temp_array = data.split(separator)
data = ''
for i in range(len(temp_array)):
temp = convert_type(temp_array[i], data_type)
data += str(temp)
if i < len(temp_array)-1:
data += separator
else:
data = convert_type(data, data_type)
else:
data = convert_type(data, data_type)
return data
def is_numeric(s):
'''returns True if string s is numeric'''
return all(c in "0123456789.-" for c in s)
'''
def split(value , filter):
value_set = set()
for separator in filter:
index = value.find(separator)
if index >=0:
temp = value[0:index]
if is_numeric(temp):
value_set.add(int(temp))
else:
print "the value is not numeric type ! temp = %d , value " % (temp, value)
sys.exit(1)
return value_set
'''
def split(value , filter):
value = value.strip()
value_set = set()
for separator in filter:
index = value.find(separator)
if index >=0:
temp_set = value.split(separator)
for v in temp_set:
v = v.strip()
if 0 == len(v):
continue
if is_numeric(v):
value_set.add(int(v))
else:
print_error("the value is not numeric type ! temp = %d , value " % (temp, value))
#sys.exit(1)
return value_set
if is_numeric(value):
value_set.add(int(value))
return value_set
def check_continue_field(attribute_name , attribute_value , schema , index):
if type(attribute_value) is types.UnicodeType or type(attribute_value) is types.StringType:
return
global continue_filed_table
if attribute_name not in schema["continue_field"]:
return
if attribute_name not in continue_filed_table:
continue_filed_table[attribute_name] = attribute_value
else:
if attribute_value != continue_filed_table[attribute_name] + 1:
log(" the field %s is not continue previous value = %d current value %d index = %d the sheet %s of file %s " \
% (attribute_name , continue_filed_table[attribute_name] , attribute_value , index , schema["sheet_name"] , schema["xls_name"]))
else:
continue_filed_table[attribute_name] = attribute_value
def check_attribute_value(attribute_name , attribute_value , main_key_name , main_key_index_name , schema , index):
global Golbal_Config
global main_key_table
global primary_key_map
#print "attribute_name = %s main_key_name = %s main_key_index_name = %s " % (attribute_name , main_key_name , main_key_index_name)
if attribute_name == main_key_name:
if attribute_value in main_key_table[main_key_index_name]:
if "no_assert" not in schema:
if type(attribute_value) is types.UnicodeType or type(attribute_value) is types.StringType:
print_error(" main key is repeated ! index = %d value = %s , file name = %s.ini " % (index , attribute_value, schema["file_name"]))
else:
print_error(" main key is repeated ! index = %d value = %d , file name = %s.ini " % (index , attribute_value, schema["file_name"]))
#sys.exit(1)
else:
if type(attribute_value) is types.UnicodeType or type(attribute_value) is types.StringType:
log(" main key is repeated ! index = %d value = %s , file name = %s.ini " % (index , attribute_value, schema["file_name"]))
else:
log(" main key is repeated ! index = %d value = %d , file name = %s.ini " % (index , attribute_value, schema["file_name"]))
return False
if (type(attribute_value) is types.UnicodeType or type(attribute_value) is types.StringType) and is_numeric(attribute_value):
main_key_table[main_key_index_name].add(int(attribute_value))
else:
main_key_table[main_key_index_name].add(attribute_value)
if attribute_name in schema["foreign_key"]:
depend_file = schema["foreign_key"][attribute_name].lower()
attribute_value_set = set()
if type(attribute_value) is types.UnicodeType or type(attribute_value) is types.StringType:
attribute_value_set = split(attribute_value , schema["separator"])
else:
attribute_value_set.add(attribute_value)
for attr_value in attribute_value_set:
find_depend = False
if depend_file in Golbal_Config:
for key , value in Golbal_Config[depend_file].items():
if value[0] <= attr_value and attr_value < value[1]:
if key.find('.') < 0:
return True
depend_file = key.lower()
find_depend = True
break
if not find_depend:
log(" the %s %s sheet %s field = %d is beyond the scope ! " \
% (schema["xls_name"] , schema["sheet_name"] , attribute_name , attr_value))
return False
if depend_file not in main_key_table:
log(" the foreign key %s %s sheet %s field = %d is not find ! depend file %s is not parse" \
% (schema["xls_name"] , schema["sheet_name"] , attribute_name , attr_value, depend_file))
return False
if attr_value not in main_key_table[depend_file]:
log(" the foreign key %s %s sheet %s field = %d is not find ! depend on the primary key is %s" \
% (schema["xls_name"] , schema["sheet_name"] , attribute_name , attr_value, primary_key_map[depend_file]))
#exit(0)
return False
return True
def string_split(value , filter):
value_set = []
for separator in filter:
index = value.find(separator)
if index >=0:
temp_set = value.split(separator)
for v in temp_set:
if is_numeric(v):
value_set.add(int(v))
else:
print_error("the value is not numeric type ! temp = %d , value " % (temp, value))
#sys.exit(1)
return value_set
if is_numeric(value):
value_set.add(int(value))
return value_set
def check_value_type(attribute_name, data, define_type, separator_array):
global NameMap
if type(data) is types.UnicodeType:
if "ENUM" == define_type or "ENUMARRAY" == define_type:
return (attribute_name in NameMap)
elif define_type not in ["STRING","STRINGARRAY"]:
if define_type in ["INTARRAY", "FLOATARRAY"]:
return all(c in "0123456789.-|" for c in data)
elif "DATE" == define_type:
date_pattern = re.compile(u'^\d{4}\/\d{1,2}\/\d{1,2}$')
result = date_pattern.match(data)
return result
elif "TIME" == define_type:
time_pattern = re.compile(u'^\d{1,2}\:\d{1,2}\:\d{1,2}$')
result = time_pattern.match(data)
return result
elif "DATETIME" == define_type:
time_pattern = re.compile(u'^\d{4}\/\d{1,2}\/\d{1,2}\s+\d{1,2}\:\d{1,2}\:\d{1,2}')
result = time_pattern.match(data)
return result
else:
return all(c in "0123456789.-" for c in data)
else:
if define_type in ["DATE", "TIME" , "DATETIME"]:
return False;
return True
def get_var_name(var):
i = 0
convert_name = ''
last_upper = False
for i in range(len(var)):
if var[i].isupper():
if i > 0 and i + 1 < len(var) and (var[i+1].islower() or var[i-1].islower()):
convert_name = convert_name
if '_' != var[i-1]:
convert_name += '_'
convert_name += var[i].lower()
continue
convert_name = convert_name + var[i].lower()
return convert_name
def sheet_to_xml_by_schema_col(sheet , schema):
global NameMap
main_key_name = ''
main_key_index_name = ''
if schema["colindex_key"] > sheet.ncols:
print_error("colindex_key is bigger than she sheet max cols: colindex_key = %d , sheet max nrows = %d" %(schema["colindex_key"] , sheet.nrows))
#sys.exit(1)
return
if schema["colindex_start"] > sheet.ncols:
print_error("rowindex_start is bigger than she sheet max rows: colindex_start = %d , sheet max ncols = %d" %(schema["colindex_start"] , sheet.ncols))
#sys.exit(1)
return
if schema["colindex_type"] > sheet.ncols:
print_error("colindex_type is bigger than she sheet max cols: rowindex_type = %d , sheet max cols = %d" %(schema["colindex_type"] , sheet.ncols))
#sys.exit(1)
return
rowindex_start = 0
if "rowindex_start" in schema:
rowindex_start = schema["rowindex_start"] - 1
colindex_key = 0
if schema["colindex_key"] > 0:
colindex_key = schema["colindex_key"] - 1
colindex_type = 0
if schema["colindex_type"] > 0:
colindex_type = schema["colindex_type"] - 1
attribute = {}
variable_name_map = {}
field_name_map = {}
field_type_map = {}
add = False
if 1 == schema["auto_add_field"]:
add = True
#print rowindex_start
for row in range(sheet.nrows):
row_native_name = sheet.cell(row , colindex_key).value
row_name = sheet.cell(row , colindex_key).value.upper()
if row < rowindex_start or row_name[0] == '_':
continue
row_type_value = sheet.cell(row, colindex_type).value
row_type_value = row_type_value.strip()
type_value_array = row_type_value.split(':')
if row_name in NameMap and "ENUM" != type_value_array[0].upper():
print_error("row = %d the field %s is enum type but define %s type" % (row , row_native_name, type_value_array[0]))
if type_value_array[0].upper() not in type_array:
print_error("row = %d the field %s type %s is not exist" % (row, row_native_name, type_value_array[0]))
attribute[row_name] = row
if add and len(row_name) > 0 and row_name not in schema["define"]:
if len(type_value_array) > 1:
schema["define"][row_name] = 1
else:
schema["define"][row_name] = 0
variable_name_map[row_name] = get_var_name(row_native_name)
field_name_map[row_name] = row_native_name
field_type_map[row_name] = type_value_array[0].upper()
colindex_start = 0
if schema["colindex_start"] > 0:
colindex_start = schema["colindex_start"] - 1
entities = []
#key_dict = {}
#key = ''
colindex_end = sheet.ncols
if "colindex_end" in schema and schema["colindex_end"] - 1 < sheet.ncols:
colindex_end = schema["colindex_end"] - 1
#check attribute type
'''
for attribute_name in ((schema["define"])):
if not attribute_name in attribute:
continue;
for col in range(colindex_start, colindex_end):
data = sheet.cell(attribute[attribute_name] , col).value
if not data or type(data) != types.FloatType:
continue
if (data - int(data)) > 0:
schema["percentage"].append(attribute_name)
if data < 0:
schema["int_type"].append(attribute_name)
'''
#print schema["define"]
#print colindex_start , colindex_end
black_num = 0
for col in range(colindex_start, colindex_start+1):
layer_dict = {}
#for attribute_name in ((schema["define"])):
#if attribute_name in attribute:
for attribute_name, row in attribute.items():
if attribute_name in schema["define"]:
data = sheet.cell(attribute[attribute_name] , col).value
#if attribute_name in schema["millisecond"]:
#if data:
#if type(data) is types.UnicodeType or type(data) is types.StringType:
#print_error("the percentage field %s is not int type: value = %s " % (attribute_name , data))
#sys.exit(1)
#data = data * 1000
#native_field = False
#if attribute_name in schema["native_field"]:
#native_field = True
#data = convert_encode(data , True)
if data or 0 == data :
if not check_value_type(attribute_name, data, field_type_map[attribute_name], schema["separator"]):
print_error("the value type is not match! attribute_name = %s , data = %s type = %s define type = %s " % (attribute_name, str(data), type(data), field_type_map[attribute_name]))
#if field_type_map[attribute_name] == "DATE":
#data = convert_data(data)
#elif field_type_map[attribute_name] == "TIME":
#data = convert_time(data)
check_continue_field(attribute_name , data , schema , col)
if attribute_name in schema["percentage"]:
if type(data) is types.UnicodeType or type(data) is types.StringType:
print_error("the percentage field %s is not int type: value = %s " % (attribute_name , data))
#sys.exit(1)
data = data * 10000
data = get_attribute_value(schema, attribute_name, data, field_type_map[attribute_name])
if not check_attribute_value(attribute_name , data , main_key_name , main_key_index_name , schema , col):
continue
layer_index = schema["define"][attribute_name]
if layer_index not in layer_dict:
layer_dict[layer_index] = OrderedDict()
layer_dict[layer_index][attribute_name] = data
else:
if attribute_name not in schema["placeholder_field"]:
log ("file %s : %s row is not exists" % (schema["sheet_name"] , attribute_name))
if len(layer_dict) == 0:
print "the column data is empty , row = %d" % (col+1)
black_num += 1
if black_num > 10:
break
continue
else:
black_num = 0
layer_begin= ''
row_dict = {}
for layer_index in layer_dict:
if '' == layer_begin:
key_index = "%d" % layer_index
if key_index in schema["key"]:
if schema["key"][key_index].upper() not in layer_dict[layer_index]:
print_error(" main key: %s is not exist! layer_index = %d col = %d" % (schema["key"][key_index] , layer_index , col+1) , 0)
print_error(layer_dict[layer_index])
#sys.exit(1)
layer_begin = layer_index
row_dict = layer_dict[layer_index].copy()
current_dict = row_dict
else:
current_dict["xml_child"] = []
current_dict["xml_child"].append(layer_dict[layer_index])
current_dict = current_dict["xml_child"]
entity_list = get_container(entities , layer_begin)
entity_list.append(row_dict)
global continue_filed_table
continue_filed_table = {}
file_name = sheet.name
if "file_name" in schema:
file_name = schema["file_name"]
write_file = file_name.lower() + ".xml"
if "node_name" not in schema:
schema["node_name"] = []
#print entities
if "rand_seed" in schema :
seed_array = []
insert_rand = 0
produce_count = 100
seed_multple = 1
for key in schema["rand_seed"]:
seed_array.append(entities[0][key.upper()])
if "insert_rand" in schema:
insert_rand = entities[0][schema["insert_rand"].upper()]
if "produce_count" in schema:
produce_count = schema["produce_count"]
if "seed_multple" in schema:
seed_multple = schema["seed_multple"]
for i in range(0 , len(seed_array)):
seed_array[i] *= seed_multple
if len(seed_array) > 0 :
entities = random_table.random_card_group(seed_array , insert_rand , produce_count)
variable_name_map["VALUE"] = "value"
field_name_map["VALUE"] = "Value"
schema["define"]["VALUE"] = 0
single , cpp_schema = get_cpp_schema(entities , schema, field_type_map, "rand_seed" in schema)
global Golbal_Config
if "no_cpp" not in Golbal_Config and "no_cpp" not in schema :
XmlToCpp( file_name, schema, cpp_schema, single, variable_name_map, field_type_map).write()
if "rand_seed" not in schema:
XmlToCShape( file_name, schema, cpp_schema, single, field_name_map, NameMap, field_type_map).write()
#if "rand_seed" not in schema:
#table_name = schema["xls_name"][0:-5]
#if exist_chinese(table_name):
#table_name = schema["sheet_name"]
#create_sql = create_table_sql(cpp_schema, schema, table_name, field_name_map)
#update_sqllite(Golbal_Config["sqllite_name"] , table_name , create_sql, entities)
if "rand_seed" not in schema:
write_to_lua_file(file_name.lower(), entities, field_name_map, cpp_schema, schema, single, field_type_map)
write_to_xml_file(write_file , entities , schema["node_name"], field_type_map)
#XmlToCpp( file_name , schema["main_key"] , entities).write()
def GetTwoCount(entity, field_name):
if "xml_child" not in entity:
return 0
#print entity
config_table = []
#random_table.append[entity[field_name]]
for item in entity["xml_child"]:
config_table.append(item[field_name])
num_table = []
random_table = []
total_random = 0
for item in config_table:
item_array = item.split('|')
if len(item_array) > 1:
total_random += int(item_array[1])
num_table.append(int(item_array[0]))
random_table.append(total_random)
else:
print_error("random size less than two")
#print "total_random = ", total_random
if total_random > 0:
hint = random.randint(1,total_random)
for i in range(0, len(random_table)):
if hint <= random_table[i]:
return num_table[i]
print_error("GetTwoCount failure! total_random = %d size = %d" % (total_random, len(random_table)))
def sheet_to_xml_by_schema_row(sheet , schema):
global main_key_table
global NameMap
main_key_name = ''
main_key_index_name = ''
if schema["rowindex_key"] > sheet.nrows:
print_error("rowindex_key is bigger than she sheet max cols: rowindex_key = %d , sheet max cols = %d" %(schema["rowindex_key"] , sheet.ncols))
#sys.exit(1)
return
if schema["rowindex_type"] > sheet.nrows:
print_error("rowindex_type is bigger than she sheet max cols: rowindex_type = %d , sheet max cols = %d" %(schema["rowindex_type"] , sheet.ncols))
#sys.exit(1)
return
if schema["rowindex_start"] > sheet.nrows:
print_error("rowindex_start is bigger than she sheet max rows: rowindex_start = %d , sheet max nrows = %d" %(schema["rowindex_start"] , sheet.nrows))
#sys.exit(1)
return
rowindex_key = 0
if schema["rowindex_key"] > 0:
rowindex_key = schema["rowindex_key"] - 1
rowindex_type = 1
if schema["rowindex_type"] > 0:
rowindex_type = schema["rowindex_type"] - 1
attribute = OrderedDict()
variable_name_map = {}
variable_name_map["xml_child"] = "xml_child"
field_name_map = {}
field_name_map["xml_child"] = "xml_child"
field_type_map = {}
add = False
if 1 == schema["auto_add_field"]:
add = True
begin_col_index = 0
if "colindex_start" in schema:
begin_col_index = schema["colindex_start"]
for col in range(sheet.ncols):
col_native_name = sheet.cell(rowindex_key, col).value.strip()
col_name = col_native_name.upper()
if col < begin_col_index or 0 == len(col_native_name) or col_name[0] == '_':
continue
col_type_value = sheet.cell(rowindex_type, col).value
col_type_value = col_type_value.strip()
type_value_array = col_type_value.split(':')
if col_name in NameMap and not ("ENUM" == type_value_array[0].upper() or "ENUMARRAY" == type_value_array[0].upper()):
print_error("col = %d the field %s is enum type but define %s type" % (col , col_native_name, type_value_array[0]))
if type_value_array[0].upper() not in type_array:
print_error("col = %d the field %s type %s is not exist" % (col , col_native_name, type_value_array[0]))
attribute[col_name] = col
if add and len(col_name) > 0 and col_name[0] != '_' and col_name not in schema["define"]:
if len(type_value_array) > 1:
schema["define"][col_name] = 1
else:
schema["define"][col_name] = 0
variable_name_map[col_name] = get_var_name(col_native_name)
field_name_map[col_name] = col_native_name
field_type_map[col_name] = type_value_array[0].upper()
if "ID" == col_name:
schema["key"]["0"] = "ID"
main_key_name = col_name
main_key_index_name = schema["file_name"].lower()
main_key_index_name = main_key_index_name + "." + main_key_name.lower()
#log("main key = %s" % main_key_index_name)
main_key_table[main_key_index_name] = set()
primary_key_map[main_key_index_name] = " %s %s sheet %s field" % (schema["xls_name"] , schema["sheet_name"] , main_key_name)
rowindex_start = 0
if schema["rowindex_start"] > 0:
rowindex_start = schema["rowindex_start"] - 1
entities = []
#key_dict = {}
#key = ''
rowindex_end = sheet.nrows
if "rowindex_end" in schema and schema["rowindex_end"] - 1 < sheet.nrows:
rowindex_end = schema["rowindex_end"] - 1
#check attribute type
'''
for attribute_name in ((schema["define"])):
if not attribute_name in attribute:
continue;
for row in range(rowindex_start, rowindex_end):
data = sheet.cell(row, attribute[attribute_name]).value
if not data or type(data) != types.FloatType:
continue
if (data - int(data)) > 0:
schema["percentage"].append(attribute_name)
if data < 0:
schema["int_type"].append(attribute_name)
'''
black_num = 0
#print schema["native_field"]
#print schema["define"]
#print attribute
for row in range(rowindex_start, rowindex_end):
layer_dict = {}
#for attribute_name in ((schema["define"])):
for attribute_name, col in attribute.items():
if attribute_name in schema["define"]:
data = sheet.cell(row, col).value
#print "native = " , attribute_name not in schema["native_field"]
#print "pre data = " , data
#native_field = False
#if attribute_name in schema["native_field"]:
#native_field = True
#print attribute_name , native_field , data
#data = convert_encode(data , True)
#print "after data = " , data
#print "row = %d , col = %d data = " %(row, attribute[attribute_name]), data
if data or 0 == data :
#print "2row = %d , col = %d data = " %(row, attribute[attribute_name]), data
if not check_value_type(attribute_name, data, field_type_map[attribute_name], schema["separator"]):
#print_error("the value type is not match! attribute_name = %s , data = %s" % (attribute_name, str(data)))
print_error("the value type is not match! attribute_name = %s , data = %s type = %s define type = %s " % (attribute_name, str(data), type(data), field_type_map[attribute_name]))
#if field_type_map[attribute_name] == "DATE":
#data = convert_data(data)
#elif field_type_map[attribute_name] == "TIME":
#data = convert_time(data)
data = convert_encode(data , field_type_map[attribute_name])
check_continue_field(attribute_name , data , schema , row)
if attribute_name in schema["percentage"]:
if type(data) is types.UnicodeType or type(data) is types.StringType:
print_error("the percentage field %s is not int type: value = %s " % (attribute_name , data))
#sys.exit(1)
data *= 10000
data = get_attribute_value(schema, attribute_name, data, field_type_map[attribute_name])
#if "EFFECTIVENESSID" == attribute_name:
#print "EffectivenessId = " , data
if not check_attribute_value(attribute_name , data , main_key_name , main_key_index_name , schema , row):
continue
layer_index = schema["define"][attribute_name]
if layer_index not in layer_dict:
layer_dict[layer_index] = OrderedDict()
layer_dict[layer_index][attribute_name] = data
else:
if attribute_name not in schema["placeholder_field"]:
log( "file %s:%s column not exists" % (schema["sheet_name"] , attribute_name))
if len(layer_dict) == 0:
print "the row data is empty , row = %d" % (row+1)
black_num += 1
if black_num > 10:
break
continue
else:
black_num = 0
layer_begin= ''
row_dict = {}
for layer_index in layer_dict:
if '' == layer_begin:
key_index = "%d" % layer_index
if key_index in schema["key"]:
if schema["key"][key_index].upper() not in layer_dict[layer_index]:
print_error(" main key: %s is not exist! layer_index = %d row = %d" % (schema["key"][key_index] , layer_index , row+1) , 0)
print_error("%s" % layer_dict[layer_index])
#log(" main key: %s is not exist! layer_index = %d row = %d" % (schema["key"][key_index] , layer_index , row+1))
#log("%s" % layer_dict[layer_index])
#sys.exit(1)
layer_begin = layer_index
row_dict = layer_dict[layer_index].copy()
current_dict = row_dict
else:
current_dict["xml_child"] = []
current_dict["xml_child"].append(layer_dict[layer_index])
current_dict = current_dict["xml_child"]
entity_list = get_container(entities , layer_begin)
entity_list.append(row_dict)
global continue_filed_table
continue_filed_table = {}
file_name = sheet.name
if "file_name" in schema:
file_name = schema["file_name"]
#print "2 file name %s " %(file_name)
write_file = file_name.lower() + ".xml"
if "node_name" not in schema:
schema["node_name"] = []
global Golbal_Config
#print len(entities.items())
if sheet.name in Golbal_Config["random_box"]:
#variable_name_map = {}
#field_name_map = {}
#
id_type = field_type_map["ID"]
#id_value = schema["define"]["ID"]
field_type_map = {}
field_type_map["VALUE"] = "INTARRAY"
variable_name_map["VALUE"] = "value"
field_name_map["VALUE"] = "Value"
del schema["define"]
schema["define"] = {}
schema["key"] = {}
#schema["define"]["ID"] = id_value
schema["define"]["VALUE"] = 0
config = {}
config["produce_count"] = Golbal_Config["random_sequence_count"]
for entity in entities:
config["min_loop_count"] = "BOX_MINLOOP" in entity and entity["BOX_MINLOOP"] or 0
config["max_loop_count"] = "BOX_MAXLOOP" in entity and entity["BOX_MAXLOOP"] or 0
config["one_percent"] = "BOX_MAXLOOP" in entity and entity["BOX_MAXLOOP"] or 0
config["two_count"] = GetTwoCount(entity, "BOX_PBOXWEIGHT")
config["three_count"] = "BOX_OBOXNUM" in entity and entity["BOX_OBOXNUM"] or 0
entities = random_table.random_box(config)
break
#write_to_xml_file(write_file , entities , schema["node_name"])
#return # only one
if sheet.name in Golbal_Config["random_card"]:
variable_name_map["VALUE"] = "value"
file_list = []
config = {}
config["produce_count"] = Golbal_Config["random_sequence_count"]
for entity in entities:
config["min_loop_count"] = "CARD_MINLOOP" in entity and entity["CARD_MINLOOP"] or 0
config["max_loop_count"] = "CARD_MAXLOOP" in entity and entity["CARD_MAXLOOP"] or 0
config["one_percent"] = 0
config["two_count"] = GetTwoCount(entity, "CARD_PCARDWEIGHT")
config["three_count"] = "CARD_OCARDNUM" in entity and entity["CARD_OCARDNUM"] or 0
value_list = random_table.random_box(config)
write_file = file_name.lower()
write_file += "_"
write_file += str(entity["ID"])
write_file += ".xml"
write_to_xml_file(write_file , value_list , schema["node_name"], field_type_map)
file_item = {}
file_item["FILE_NAME"] = write_file
file_item["ID"] = str(entity["ID"])
file_list.append(file_item)
write_file = file_name.lower() + ".xml"
write_to_xml_file(write_file , file_list , schema["node_name"], field_type_map)
return
#if "rand_seed" in schema:
# entities = random_table.random_card_group([9,18,180] , 10 , 100)
# variable_name_map["VALUE"] = "value"
single , cpp_schema = get_cpp_schema(entities, schema, field_type_map, "rand_seed" in schema)
if "no_cpp" not in Golbal_Config and "no_cpp" not in schema :
XmlToCpp( file_name, schema, cpp_schema, single, variable_name_map,field_type_map).write()
if sheet.name not in Golbal_Config["random_box"]:
XmlToCShape( file_name, schema, cpp_schema, single, field_name_map, NameMap, field_type_map).write()
#if "rand_seed" not in schema:
#table_name = schema["xls_name"][0:-5]
#if exist_chinese(table_name):
#table_name = schema["sheet_name"]
#create_sql = create_table_sql(cpp_schema, schema, table_name , field_name_map)
#update_sqllite(Golbal_Config["sqllite_name"] , table_name , create_sql, entities)
if "rand_seed" not in schema:
write_to_lua_file(file_name.lower(), entities, field_name_map, cpp_schema, schema, single, field_type_map)
write_to_xml_file(write_file , entities , schema["node_name"], field_type_map)
#XmlToCpp( file_name , schema["main_key"] , entities).write()
def parse_excel_by_schema(schema, excel_path):
global main_key_table
global primary_key_map
global Golbal_Config
if "excel_path" in schema:
excel_path = schema["excel_path"]
file_name = os.path.join(excel_path , schema["xls_name"])
#log("parse file_name = %s " % file_name)
print "--------------------------------------------"
print "1 file_name = %s " % file_name
if not os.path.isfile(file_name):
print "the xls file is not exist! excel file name = %s , sheet name = %s " % (file_name , schema["sheet_name"])
log("the xls file is not exist! excel file name = %s sheet name = %s " % (file_name , schema["sheet_name"]))
return 0
workbook = open_workbook(file_name)
print "total sheets = %d" % workbook.nsheets
for sheet in workbook.sheets():
if sheet.name.upper() == schema["sheet_name"].upper():
print "sheet name = %s " % (schema["sheet_name"])
#log("sheet name = %s " % (schema["sheet_name"]))
if sheet.name.upper() in Golbal_Config["col_config"]:
if 1 or "colindex_key" not in schema:
schema["colindex_key"] = 2
if 1 or "colindex_start" not in schema:
schema["colindex_start"] = 4
if 1 or "colindex_type" not in schema:
schema["colindex_type"] = 1
if 1 or "rowindex_start" not in schema:
schema["rowindex_start"] = 3
sheet_to_xml_by_schema_col(sheet , schema)
else:
if 1 or "rowindex_key" not in schema:
schema["rowindex_key"] = 3
if 1 or "rowindex_start" not in schema:
schema["rowindex_start"] = 4
if 1 or "rowindex_type" not in schema:
schema["rowindex_type"] = 2
sheet_to_xml_by_schema_row(sheet , schema)
return 1
log("convert xls: the sheet name is not exist! excel file name = %s sheet name = %s " % (file_name , schema["sheet_name"]))
return 0
def field_name_upper(schema, excel_file_name, excel_full_file_name):
global Golbal_Config
field_name = ''
field_value = ''
if "xls_name" not in schema:
schema["xls_name"] = excel_full_file_name
if "sheet_name" not in schema:
schema["sheet_name"] = excel_file_name
if "file_name" not in schema:
schema["file_name"] = excel_file_name
if "node_name" not in schema:
schema["node_name"] = []
schema["node_name"].append(excel_file_name)
if "Array_Config" in Golbal_Config and excel_file_name in Golbal_Config["Array_Config"]:
schema["array_flag"] = 1
if "key" not in schema:
schema["key"] = {}
if "define" not in schema:
schema["define"] = {}
for attribute_name , attribute_value in schema["key"].items():
schema["key"][attribute_name] = attribute_value.upper()
for attribute_name , attribute_value in schema["define"].items():
field_value = attribute_value
field_name = attribute_name.upper()
if field_name != attribute_name:
schema["define"][field_name] = field_value
del schema["define"][attribute_name]
if "map" not in schema:
schema["map"] = {}
for attribute_name , attribute_value in schema["map"].items():
field_value = attribute_value.upper()
field_name = attribute_name.upper()
if field_name != attribute_name:
schema["map"][field_name] = field_value
del schema["map"][attribute_name]
if "int_type" not in schema:
schema["int_type"] = []
if "percentage" not in schema:
schema["percentage"] = []
if "separator" not in schema:
schema["separator"] = ["|",",",";",":"]
if "continue_field" not in schema:
schema["continue_field"] = []
if "array_field" not in schema:
schema["array_field"] = []
if "placeholder_field" not in schema:
schema["placeholder_field"] = []
if "native_field" not in schema:
schema["native_field"] = []
if "auto_add_field" not in schema:
schema["auto_add_field"] = 1
if "random_box" not in schema:
schema["random_box"] = []
if "random_card" not in schema:
schema["random_card"] = []
if "random_sequence_count" not in schema:
schema["random_sequence_count"] = 1
native_field_temp = []
for attribute_name in schema["native_field"]:
field_name = attribute_name.upper()
#if field_name != attribute_name:
native_field_temp.append(field_name)
schema["native_field"] = native_field_temp
convert_percentage = []
for attribute in schema["percentage"] :
convert_percentage.append(attribute.upper())
schema["percentage"] = convert_percentage
int_type_temp = []
for attribute_name in schema["int_type"]:
field_name = attribute_name.upper()
if field_name != attribute_name:
int_type_temp.append(field_name)
schema["int_type"] = int_type_temp
array_field_temp = []
for attribute_name in schema["array_field"]:
field_name = attribute_name.upper()
if field_name != attribute_name:
array_field_temp.append(field_name)
schema["array_field"] = array_field_temp
placeholder_field_temp = []
for attribute_name in schema["placeholder_field"]:
field_name = attribute_name.upper()
if field_name != attribute_name:
placeholder_field_temp.append(field_name)
schema["placeholder_field"] = placeholder_field_temp
if "sqllite_name" not in Golbal_Config:
Golbal_Config["sqllite_name"] = "SQLLite" + os.sep +"sqllite.db"
if "foreign_key" not in schema:
schema["foreign_key"] = {}
for attribute_name, attribute_value in Golbal_Config["foreign_key"].items():
upper_name = attribute_name.upper()
if excel_file_name.upper() == upper_name[0:upper_name.index('.')]:
field_name = upper_name[upper_name.index('.')+1:]
schema["foreign_key"][field_name] = attribute_value
for attribute_name , attribute_value in schema["foreign_key"].items():
field_value = attribute_value
field_name = attribute_name.upper()
if field_name != attribute_name:
del schema["foreign_key"][attribute_name]
schema["foreign_key"][field_name] = field_value
def is_load(file_name):
global main_key_table
for attribute_name , attribute_value in main_key_table.items():
if file_name.upper() == attribute_name[0:attribute_name.index('.')].upper():
return True
return False
def add_depend_file(file_name , depend_file_set):
if '.' not in file_name:
if ':' in file_name:
return
print_error(" the foreign key format is wrong! value = %s , " % (file_name))
#sys.exit(1)
depend_file_set.add(file_name[0:file_name.index('.')])
def get_depend_file2(excel_file_name):
global Golbal_Config
depend_file_set = set()
for attribute_name, attribute_value in Golbal_Config["foreign_key"].items():
upper_name = attribute_name.upper()
if upper_name[0:upper_name.index('.')] == excel_file_name.upper():
#print_error("get_depend_file")
if attribute_value in Golbal_Config:
for attribute_name , attribute_value in Golbal_Config[attribute_value].items():
add_depend_file(attribute_name , depend_file_set)
else:
add_depend_file(attribute_value , depend_file_set)
return depend_file_set
def get_depend_file(schema):
global Golbal_Config
depend_file_set = set()
for attribute_name , attribute_value in schema["foreign_key"].items():
if attribute_value in Golbal_Config:
for attribute_name , attribute_value in Golbal_Config[attribute_value].items():
add_depend_file(attribute_name , depend_file_set)
else:
add_depend_file(attribute_value , depend_file_set)
return depend_file_set
def get_schema_config(excel_full_file_name, schema_config_array):
for item in schema_config_array:
if "xls_name" in item and excel_full_file_name.upper() == item["xls_name"].upper():
return item
return {}
def parse_impl(excel_file_name, excel_path, schema_config_array , layer = 1):
result = 0
total_num = 0
success_num = 0
if is_load(excel_file_name):
#print_error("has load %s" % excel_file_name)
return result , total_num , success_num
excel_full_file_name = ''
file_name = os.path.join(excel_path , excel_file_name)
if os.path.isfile(file_name + '.xlsx'):
excel_full_file_name = excel_file_name + '.xlsx'
elif os.path.isfile(file_name + '.xls'):
excel_full_file_name = excel_file_name + '.xls'
else:
print_error("the excel file %s is not exist" % (file_name + '.xlsx'))
return 1 , total_num , success_num
total_num += 1
schema = get_schema_config(excel_full_file_name, schema_config_array)
field_name_upper(schema, excel_file_name, excel_full_file_name)
depend_file_set = get_depend_file(schema)
#layer += 1
#if layer > 3:
#print "excel_file_name = %s , depend file" % excel_file_name
#global main_key_table
#print main_key_table
#print_error(depend_file_set)
for field_name in depend_file_set:
result , totle , success = parse_impl(field_name, excel_path, schema_config_array, layer)
if 1 == result:
log("depend config is not exist! file = %s depend file = %s" % (file_name , field_name))
continue
total_num += totle
success_num += success
#print "parse excel %s" % excel_full_file_name
#global main_key_table
#print main_key_table
success_num += parse_excel_by_schema(schema, excel_path)
return 0 , total_num , success_num
def parse_config():
global config_name
global Golbal_Config
print "load %s file" % config_name
Golbal_Config = json.load(file(config_name),object_pairs_hook=OrderedDict)
if "foreign_key" not in Golbal_Config:
Golbal_Config["foreign_key"] = {}
#sys.exit(1)
excel_path = ''
if "excel_path" in Golbal_Config:
excel_path = Golbal_Config["excel_path"]
col_config_array = []
for xls_name in Golbal_Config["col_config"]:
field_name = xls_name.upper()
col_config_array.append(field_name)
Golbal_Config["col_config"] = col_config_array
no_parse_array = []
for xls_name in Golbal_Config["no_parse"]:
field_name = xls_name.upper()
no_parse_array.append(field_name)
Golbal_Config["no_parse"] = no_parse_array
global NameMap
global log_string
NameMap = excel2type.parse_name_map_config(excel_path)
#print NameMap
#return
#log("%s" % Golbal_Config["item_conf.id"].items())
last_parse_time = 0
ISOTIMEFORMAT='%Y-%m-%d %X'
fp_r = open("last_parse_time",'a+')
if fp_r:
line_str = fp_r.readline()
if line_str:
#last_parse_time = int(line_str)
last_parse_time = int(time.mktime(time.strptime(line_str, ISOTIMEFORMAT)))
pass
fp_r.close()
#if "last_parse_time" not in Golbal_Config:
#Golbal_Config["last_parse_time"] = 0
schema_config_array = []
for item in os.listdir(Golbal_Config["config_dir"]):
if item[-4:] == '.ini':
file_name = os.path.join(Golbal_Config["config_dir"] , item)
schema = json.load(file(file_name) ,object_pairs_hook=OrderedDict)
schema_config_array.append(schema)
result = 1
total_num = 0
success_num = 0
#result , totle , success = parse_impl(item, excel_path, False)
#total_num += totle
#success_num += success
for item in os.listdir(excel_path):
if item[0] <> '~' and (item[-4:] == '.xls' or item[-5:] == '.xlsx') :
#print "\n\n item = %s \n\n" % item
xls_file_name = os.path.join(excel_path , item)
excel_file_attribute = os.stat(xls_file_name)
if excel_file_attribute.st_mtime <= last_parse_time:
continue
index = item.find('.')
excel_name = item[0:index]
if excel_name.upper() in Golbal_Config["no_parse"]:
continue
result , totle , success = parse_impl(excel_name, excel_path, schema_config_array)
total_num += totle
success_num += success
print "convert xls: total config = %d ! convert success = %d " % (total_num , success_num)
fp_w = open("last_parse_time",'w')
current_time = int(time.time())
fp_w.write(time.strftime(ISOTIMEFORMAT, time.localtime()))
fp_w.close()
def parse():
global config_name
if len(sys.argv) > 1 :
config_name = sys.argv[1]
#print "Convert excel files to json file begin"
parse_config()
file_name = "error.txt"
fid = open(file_name , 'wb')
fid.write(log_string)
fid.close()
#print "\n"
# print('\033[1;31;40m')
print_color.print_red_text(log_string)
#print('\033[0m')
#print "\n"
#list_excel_dir()
#print "Convert excel files to json file end"
if __name__ == '__main__':
parse()
'''
if len(sys.argv) != 2 :
print "usage: %s [input xls file]" % sys.argv[0]
print "e.g. : %s myinput.xls" % sys.argv[0]
print "Note : the input file should be MS excel doc.\n"
sys.exit()
'''
xml2cshape.py
#encoding=GBK
import os
import sys
import re
import types
################################ CPP Head ######################################
define_body = '''\
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Reflection;
using GameCore.Common;
using System.Xml;
using System.Xml.Serialization;
using GameCore.Utils;
namespace GameCore.Database.Db
{
[XmlRoot("root")]
public class %s
{
%s
[XmlElement("%s")]
%s
%s
}
}
'''
child_class = '''\
public class LevelRow
{
%s
}
'''
farther_class = '''\
public class Row
{
%s
}
'''
key_func = '''\
public %s.Row getRow(%s key)
{
for (int i = 0; i < Rows.Length; ++i)
{
var row = Rows[i];
if (row.%s == key)
{
return row;
}
}
return null;
}
'''
xml_enum = '\t\t\t[XmlAttribute("%s")]\r\n'
string_var ='''\t\t\tpublic string %s { get; set; }
'''
bool_var ='''\t\t\tpublic bool %s { get; set; }
'''
int_var ='''\t\t\tpublic int %s { get; set; }
'''
float_var ='''\t\t\tpublic float %s { get; set; }
'''
enum_var = '''\t\t\tpublic %s %s { get; set; }
'''
xml_element= '\t\t\t[XmlElement("%s")]\r\n'
array_var = '''\t\t\tpublic LevelRow[] LevelRow { get; set; }
'''
vector_var ='''
public string _%s { get; set; }
private XMLValueArray<%s> __%s__;
public XMLValueArray<%s> %s
{
get
{
if (__%s__ == null)
{
__%s__ = _%s;
}
return __%s__;
}
}
'''
list_member_var = '''public Row[] Rows; '''
one_member_var = '''public Row oneRow; '''
#################################################################################
def is_numeric(s):
'''returns True if string s is numeric'''
return all(c in "0123456789.+-" for c in s)
class XmlToCShape(object):
def __init__(self, config_name = "test" , schema = [] , cpp_schema = {} , single = 0 , variable_name_map = {}, NameMap = {}, field_type_map = {}, root_node = "root"):
self.config_name = config_name
self.macro_name = config_name[0:].upper()
self.cpp_name = ''
#strlist = config_name.split('_')
#for value in strlist:
#self.cpp_name += value[0].upper() + value[1:].lower()
#print "cconfig_name : %s " % config_name
self.cpp_name = config_name#self.ConvertName(config_name)
self.schema = schema
self.cpp_schema = cpp_schema
self.single = single
self.root_node = root_node
self.key = ''
if "key" not in schema:
schema["key"] = {}
elif "0" in schema["key"]:
self.key = schema["key"]["0"]
if "separator" not in self.schema:
self.schema["separator"] = []
self.static_member_define = ''
self.variable_name_map = variable_name_map
self.NameMap = NameMap
self.field_type_map = field_type_map
def ConvertName(self , name):
dest_name = ''
strlist = name.split('_')
for value in strlist:
dest_name += value[0].upper() + value[1:]#.lower()
return dest_name
def get_node_name(self , layer_index):
node_name = 'item'
if layer_index < len(self.schema['node_name']):
node_name = self.schema['node_name'][layer_index]
return node_name
def get_child_key_type(self , data = {} , child_layer = 0):
child_layer_string = "%d" % child_layer
if child_layer_string in self.schema["key"]:
key_name = self.schema["key"][child_layer_string]
if key_name in data:
if type(data[key_name]) is types.UnicodeType or type(data[key_name]) is types.StringType:
return "string"
elif type(data[key_name]) is types.FloatType:
return "float"
return "int"
def is_separator_string(self , key , string_var):
for separator in self.schema["separator"]:
index = string_var.find(separator)
if index >=0:
#temp = string_var[0:index]
temp_set = string_var.split(separator)
value_type = 2
for temp in temp_set:
if is_numeric(temp):
value_type = 1
if '.' in temp:
value_type = 3
return value_type,separator
#if temp.isdigit():
return value_type,separator
#if is_numeric(temp):
#return 1 , separator
#else:
#return 2 , separator
separator = ','
#print "is_separator_string %s" % key
if key in self.schema["array_field"]:
if len(string_var) or is_numeric(string_var) :
return 1 , separator
else:
return 2 , separator
return 0 , ''
def define_struct_data(self , data = {} , layer_index = 0):
child_struct = ''
struct_body = ''
for key , value in data.items():
var = self.variable_name_map[key]
if "class" == var:
var = "type"
if key == "xml_child":
child_layer = layer_index+1
child_struct = self.define_struct_data(value , child_layer)
struct_body += xml_element % "item"
struct_body += array_var
elif key in self.NameMap and self.field_type_map[key.upper()] != "ENUMARRAY":
struct_body += xml_enum % key
struct_body += enum_var % (var, var)
elif type(value) is types.IntType:
struct_body += xml_enum % key
if "BOOL" == self.field_type_map[key.upper()]:
struct_body += bool_var % var
else:
struct_body += int_var % var
elif type(value) is types.FloatType:
struct_body += xml_enum % key
struct_body += float_var % var
elif key in self.schema["array_field"]:
ret = 1
separator = ','
struct_body += xml_enum % key
if type(value) is types.UnicodeType or type(value) is types.StringType:
ret , separator = self.is_separator_string(key , value)
if type(value) is types.FloatType:
ret = 3
if 1 == ret:
struct_body += vector_var % (var, 'int', var, 'int' , var
, var, var, var, var)
elif 2 == ret:
struct_body += vector_var % (var, 'string', var, 'string' , var
, var, var, var, var)
elif 3 == ret:
struct_body += vector_var % (var, 'float', var, 'float' , var
, var, var, var, var)
else:
struct_body += string_var % var
elif type(value) is types.UnicodeType or type(value) is types.StringType:
struct_body += xml_enum % key
ret , separator = self.is_separator_string(key , value)
if 1 == ret:
struct_body += vector_var % (var, 'int', var, 'int' , var
, var, var, var, var)
elif 2 == ret:
struct_body += vector_var % (var, 'string', var, 'string' , var
, var, var, var, var)
elif 3 == ret:
struct_body += vector_var % (var, 'float', var, 'float' , var
, var, var, var, var)
else:
struct_body += string_var % var
else:
print "C# define_struct_data:the type is not deal: type = %s" % type(value)
class_define = farther_class
if layer_index > 0:
class_define = child_class
struct_body = class_define % struct_body
struct_body = child_struct + struct_body
return struct_body
def write(self):
if len(self.cpp_schema) == 0 :
print 'the cpp schema is empty: cpp schema = %s' % (self.cpp_schema)
return
single = self.single
var_body = ''
key_type = self.get_child_key_type(self.cpp_schema , 0)
var_body = self.define_struct_data(self.cpp_schema)
member_var = one_member_var
memver_func = ''
if not single:
member_var = list_member_var
if "0" in self.schema["key"]:
memver_func = key_func % (self.cpp_name, key_type, self.variable_name_map[self.schema["key"]["0"]])
file_body = define_body % ( self.cpp_name, var_body, self.schema["node_name"][0], member_var, memver_func)
file_name = 'CShape'+ os.sep + self.cpp_name + '.cs'
fid = open(file_name, 'wb')
fid.write(file_body)
fid.close()
print "create cpp file : %s " % file_name