栈就是后进先出的数据结构
stack = []
def pushit():
stack.append(input('Enter your new string:').strip())
def popit():
if len(stack) == 0:
print("You can't pop from an empty stack!")
else:
print("Removed " + stack.pop())
def viewstack():
print(stack)
CMDS = {'p': pushit, 'o':popit, 'q': quit, 'v': viewstack}
def showmenu():
re = '''
(p)ush
p(o)p
(v)iew
(q)uit
'''
while True:
while True:
try:
choice = input(re).strip()[0].lower()
except (EODError, KeyError, IndexError):
choice = 'q'
if choice not in 'povq':
print('Invalid option, try again')
else:
break
CMDS[choice]()
showmenu()
(p)ush
p(o)p
(v)iew
(q)uit
p
Enter your new string:n
(p)ush
p(o)p
(v)iew
(q)uit
p
Enter your new string:sr
(p)ush
p(o)p
(v)iew
(q)uit
v
['n', 'sr']
(p)ush
p(o)p
(v)iew
(q)uit
p
Enter your new string:v
(p)ush
p(o)p
(v)iew
(q)uit
v
['n', 'sr', 'v']
(p)ush
p(o)p
(v)iew
(q)uit
o
Removedv
队列就是先进先出的数据结构
代码只需要将栈中popit函数的stack.pop改成stack.pop(0)
'''寻找一条路径的函数'''
# 用字典和列表来表示图的元素关系
graph = {'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['A', 'D', 'F'],
'D': ['C'],
'E': ['F'],
'F': ['C', 'G']}
def find_one_path(graph, start, end, path=[]):
# 这里要注意和path.append(start)的区别,后者递归完全性增加start
# 而前者只要不进入下一个递归,path是不会变,仍然是上一个path
path = path + [start]
print(path)
if start == end:
return path
if start not in graph.keys():
return None
for node in graph[start]:
if node not in path:
new_path = find_one_path(graph, node, end, path)
if new_path:
return new_path
return None
find_one_path(graph, 'A', 'G')
['A']
['A', 'B']
['A', 'B', 'C']
['A', 'B', 'C', 'D']
['A', 'B', 'C', 'F']
['A', 'B', 'C', 'F', 'G']
['A', 'B', 'C', 'F', 'G']
'''寻找所有路径的函数'''
'''
跟上述函数一样,首先名字改成find_all_path, 接着指定一个全局变量列表,
在上述函数的最后的代码改成 if new_path:all_path.append(new_path)
'''
'''
all_path = []
find_all_path(graph, 'A', 'G')
all_path
'''
'''
而寻找最短路径函数,一种方法是只需在列表里面选出最短的即可
而另外一种更加简洁的方法就是:上述的代码如果不return new_path,则会一次
次的寻找可行路径,只需要设置一个局部变量shortest_path(初始值为None),
将其与每一个new_path进行比较赋值
'''
'''寻找最短路径根本不用这样,直接用广度优先算法直接查找就是最短路径'''
class Graph(object):
def __init__(self,*args,**kwargs):
self.node_neighbors = {}
self.visited = {}
def add_nodes(self,nodelist):
for node in nodelist:
self.add_node(node)
def add_node(self,node):
if not node in self.nodes():
self.node_neighbors[node] = []
def add_edge(self,edge):
u,v = edge
if(v not in self.node_neighbors[u]) and ( u not in self.node_neighbors[v]):
self.node_neighbors[u].append(v)
if(u!=v):
self.node_neighbors[v].append(u)
def nodes(self):
return self.node_neighbors.keys()
# 深度优先的函数
def depth_first_search(self, root=None):
order = []
def dfs(node):
order.append(node)
self.visited[node] = True
print(self.visited)
for node in self.node_neighbors[node]:
if not node in self.visited:
dfs(node)
if root:
dfs(root)
for node in self.nodes():
if not node in self.visited:
dfs(node)
print(order)
# 广度优先的函数
def breath_first_search(self, root=None):
queue = []
order = []
def bfs():
while len(queue) > 0:
node = queue.pop(0)
self.visited[node] = True
print(self.visited)
for n in self.node_neighbors[node]:
# 在树结构里面,不可能出现两个一样的子节点
# 而不让n在此出现在queue是因为有可能n同时是两个父节点的子节点
if not n in self.visited and n not in queue:
queue.append(n)
order.append(n)
if root:
queue.append(root)
order.append(root)
bfs()
print(order)
if __name__ == '__main__':
g = Graph()
g.add_nodes([i+1 for i in range(8)])
g.add_edge((1, 2))
g.add_edge((1, 3))
g.add_edge((2, 4))
g.add_edge((2, 5))
g.add_edge((4, 8))
g.add_edge((5, 8))
g.add_edge((3, 6))
g.add_edge((3, 7))
g.add_edge((6, 7))
print("nodes:", g.node_neighbors)
# g.depth_first_search(1)
g.breath_first_search(1)
nodes: {1: [2, 3], 2: [1, 4, 5], 3: [1, 6, 7], 4: [2, 8], 5: [2, 8], 6: [3, 7], 7: [3, 6], 8: [4, 5]}
{1: True}
{1: True, 2: True}
{1: True, 2: True, 3: True}
{1: True, 2: True, 3: True, 4: True}
{1: True, 2: True, 3: True, 4: True, 5: True}
{1: True, 2: True, 3: True, 4: True, 5: True, 6: True}
{1: True, 2: True, 3: True, 4: True, 5: True, 6: True, 7: True}
{1: True, 2: True, 3: True, 4: True, 5: True, 6: True, 7: True, 8: True}
[1, 2, 3, 4, 5, 6, 7, 8]
上述广度优先的算法中,直接list.pop[0]来模拟先进先出,这不够pythonic,直接使用deque模块来模拟队列
from collections import deque
graph = {1: [2, 3], 2: [1, 4, 5], 3: [1, 6, 7], 4: [2, 8], 5: [2, 8], 6: [3, 7], 7: [3, 6], 8: [4, 5]}
def search_num():
search_queue = deque()
search_queue += graph[1]
search = set()
want = input('Input the number you want:')
while search_queue:
node = search_queue.popleft()
print(node)
if node not in search:
search.add(node)
if node_is_wanted(node, want):
print('Found!')
break
else:
search_queue += graph[node]
def node_is_wanted(num, want):
if int(num) == int(want):
return True
else:
return False
search_num()
Input the number you want:3
Found!
这是一种求最短路径的方法,每条边都赋有权重,但是权重不能为负值,这是广度优先算法的拓展版
他需要不断地更新每个点的开销,所以需要一个映射每个点的开销散列表(在python中用字典),然后基本的图字典,还有一个父节点的散列表来存储最短路径
# 准备工作,建立散列表
graph = {}
graph['start'] = {}
graph['start']['a'] = 6
graph['start']['b'] = 2
graph['a'] = {}
graph['a']['fin'] = 1
graph['b'] = {}
graph['b']['a'] = 3
graph['b']['fin'] = 5
graph['fin'] = {}
# 节点的开销是指起点到节点的最短距离,这也用个散列表来表示
# 对于不知道的开销使用无穷大来表示,因为后期必定可以更新
infinity = float('inf')
costs = {}
costs['a'] = 6
costs['b'] = 2
costs['fin'] = infinity
# 一个储存父节点的散列表,用来记录最优路径,父节点一定最初由起点延伸出来,
# 也就是说,起点的子节点的父节点一定用起点表示(英文起点的子节点同时又是其他子节点的子节点)
# 除了起点的子节点外,所有其他节点的父节点都用None表示
parents = {}
parents['a'] = 'start'
parents['b'] = 'start'
parents['fin'] = None
processed = []
print(graph)
print(costs)
{'start': {'a': 6, 'b': 2}, 'a': {'fin': 1}, 'b': {'a': 3, 'fin': 5}, 'fin': {}}
{'a': 6, 'b': 2, 'fin': inf}
def find_min_node(costs):
min_cost = float('inf')
min_cost_node = None
for node in costs.keys():
if min_cost > costs[node] and node not in processed:
min_cost = costs[node]
min_cost_node = node
return min_cost_node
node = find_min_node(costs)
while node is not None:
cost = costs[node]
neighbors = graph[node]
for n in neighbors.keys():
new_cost = cost + neighbors[n]
if new_cost < costs[n]:
costs[n] = new_cost
parents[n] = node
processed.append(node)
node = find_min_node(costs)
path = []
son = parents['fin']
path.append('fin')
while son != 'start':
path.append(son)
son = parents[son]
path.append('start')
path.reverse()
print(path)
['start', 'b', 'a', 'fin']
二分查找是一个复杂度为O(logn)的算法,查找的前提是列表是有序的
# 尝试使用递归, 这个尝试逻辑上没有问题
# 但是由于进行了切片,找不到数字在原来数组的位置。。
def binary_search(l, item):
low = 0
high = len(l) - 1
if low == high:
return low
else:
mid = int((low + high)/2)
if l[mid] > item:
high = mid
l = l[low: high]
binary_search(l, item)
elif l[mid] < item:
low = mid + 1
l = l[low: high + 1]
binary_search(l, item)
elif l[mid] == item:
print(mid)
l = [1, 5, 9, 10, 25, 36, 42, 59, 88]
binary_search(l, 9)
0
# 简单正确版(递归)
def binary_search(left, right, l, item):
if left == right:
return low
else:
mid = (left + right)//2
if l[mid] == item:
return mid
elif l[mid] > item:
right = mid
else:
left = mid + 1
return binary_search(left, right, l, item)
l = [5, 6, 7, 8, 9, 11, 21, 32, 42, 51]
print(binary_search(0, 9, l, 9))
4
# 采用循,总不可能是for循环吧。。。
# 采用while循环,那么循环的标志是什么呢?
def binary_search(l, item):
low = 0
high = len(l) - 1
while low < high:
mid = (low + high)//2
if l[mid] == item:
return mid
elif l[mid] > item:
high = mid - 1
else:
low = mid + 1
print(mid)
l = [5, 6, 7, 8, 9, 11, 21, 32, 42, 51]
binary_search(l, 9)
4
目前公认的最快的排序方法,复杂度为O(nlogn),冒泡为n²
def quick_sort(item):
low = 0
high = len(item) - 1
mid = (low + high)//2
if len(item) < 2:
return item
else:
less = [i for i in item if i < item[mid]]
bigger = [i for i in item if i > item[mid]]
equal = [i for i in item if i == item[mid]]
return quick_sort(less) + equal + quick_sort(bigger)
l = [2, 1, 3, 1, 4, 5, 4, 3, 2]
quick_sort(l)
[1, 1, 2, 2, 3, 3, 4, 4, 5]
所谓贪婪算法,就是从一开始就寻找最优策略,之后的每一步也都寻找最优策略
这种算法适合于集合问题,旅行商等问题
rela = {}
rela['a'] = set(['b', 'c', 'd', 'e', 'f', 'g'])
rela['b'] = set(['a', 'c', 'd', 'e', 'f', 'g'])
rela['c'] = set(['a', 'b', 'd', 'e', 'g'])
rela['d'] = set(['a', 'b', 'c', 'e', 'g'])
rela['e'] = set(['a', 'b', 'c', 'd', 'g'])
rela['f'] = set(['a', 'b', 'g'])
rela['g'] = set(['a', 'b', 'c', 'd', 'e', 'f'])
# 朋友圈最广人的朋友圈集合
max_rela = set()
# 最广人的集合
clear = set()
# 第一次寻找最广的朋友圈和这个人
for k, v in rela.items():
if len(v) > len(max_rela):
max_rela = set(v)
m_p = k
clear.add(m_p)
'''
遍历最广的朋友圈,求出朋友圈中每个人的朋友圈和上一个朋友圈的交集new_covered,
其中最大的赋值给max_rela,
每次遍历的时候不需要考虑当前人是否在clear中,因为每次交运算都会去除对方,比如a&b会除去a和b,
也不用去判断这个clear是否在这个人的朋友圈中,因为关系是相互的,你认识我,我一定认识你(所以说最初这个表就一定要建立清楚)
'''
while max_rela:
covered = set()
max_person = None
if len(max_rela) == 1:
clear = clear|max_rela
for person in max_rela:
new_covered = rela[person] & max_rela
if len(new_covered) > len(covered):
covered = new_covered
max_person = person
if max_person:
clear.add(max_person)
max_rela = covered
print(max_rela)
clear
{'d', 'f', 'e', 'c', 'b'}
{'e', 'f', 'c', 'd'}
{'c', 'd'}
{'d'}
set()
{'a', 'b', 'c', 'd', 'e', 'g'}