aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Strapp <strap012@umn.edu>2021-04-26 10:53:43 -0500
committerMatt Strapp <strap012@umn.edu>2021-04-26 15:03:12 -0500
commitd311af01feb32550aaae8638d4cc167948f5464c (patch)
tree3c0b8606a7a5267e3e890a63b8565c5c27f10438
parentactually add files (diff)
downloadcsci4511w-d311af01feb32550aaae8638d4cc167948f5464c.tar
csci4511w-d311af01feb32550aaae8638d4cc167948f5464c.tar.gz
csci4511w-d311af01feb32550aaae8638d4cc167948f5464c.tar.bz2
csci4511w-d311af01feb32550aaae8638d4cc167948f5464c.tar.lz
csci4511w-d311af01feb32550aaae8638d4cc167948f5464c.tar.xz
csci4511w-d311af01feb32550aaae8638d4cc167948f5464c.tar.zst
csci4511w-d311af01feb32550aaae8638d4cc167948f5464c.zip
Rebase newer branch
Diffstat (limited to '')
-rw-r--r--Algorithm.py148
-rw-r--r--Board.py78
-rw-r--r--DotsNBoxes.py79
-rw-r--r--Nodes.py18
-rw-r--r--python/GameState.py144
-rw-r--r--python/MCTS.py151
-rw-r--r--python/agent.py55
-rw-r--r--python/alphaBeta.py30
-rw-r--r--python/ann.py170
-rw-r--r--python/board.py110
-rw-r--r--python/dataStructures.py32
-rw-r--r--python/dotsandboxes/README.md134
-rwxr-xr-xpython/dotsandboxes/dotsandboxesagent5
-rw-r--r--python/dotsandboxes/dotsandboxesagent.py213
-rw-r--r--python/dotsandboxes/dotsandboxescompete.py212
-rw-r--r--python/dotsandboxes/dotsandboxesserver.py60
-rw-r--r--python/dotsandboxes/requirements.txt1
-rw-r--r--python/dotsandboxes/static/dotsandboxes.css10
-rw-r--r--python/dotsandboxes/static/dotsandboxes.html56
-rw-r--r--python/dotsandboxes/static/dotsandboxes.js454
-rw-r--r--python/evaluate.py240
-rwxr-xr-xpython/start.sh7
-rw-r--r--runnable_agent/Agent.jarbin0 -> 531380 bytes
-rw-r--r--runnable_agent/dotsandboxesagent3
-rw-r--r--runnable_agent/final_annbin0 -> 2712 bytes
-rw-r--r--sandbox/game.py282
-rw-r--r--sandbox/pics/A.pngbin409 -> 0 bytes
-rw-r--r--sandbox/pics/B.pngbin359 -> 0 bytes
-rw-r--r--sandbox/pics/X.pngbin453 -> 0 bytes
-rw-r--r--sandbox/pics/block.pngbin109 -> 0 bytes
-rw-r--r--sandbox/pics/empty.pngbin145 -> 0 bytes
-rw-r--r--sandbox/pics/lineX.pngbin110 -> 0 bytes
-rw-r--r--sandbox/pics/lineXempty.pngbin133 -> 0 bytes
-rw-r--r--sandbox/pics/lineY.pngbin110 -> 0 bytes
-rw-r--r--sandbox/pics/lineYempty.pngbin127 -> 0 bytes
35 files changed, 2087 insertions, 605 deletions
diff --git a/Algorithm.py b/Algorithm.py
deleted file mode 100644
index 99488a8..0000000
--- a/Algorithm.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# import MCTS
-import math
-from copy import deepcopy
-from time import clock
-from random import choice
-
-from GameState import GameState
-
-class Algo: # A class for defining algorithms used (minimax and alpha-beta pruning)
-
- def miniMax(State, Ply_num): # Function for the minimax algorithm
-
- for i in range(State.Current.dimY):
- for j in range(State.Current.dimX):
- if State.Current.Mat[i][j] == ' ' and (j, i) not in State.children:
- State.Make(j, i, True)
- if Ply_num < 2:
- return (i, j)
-
- Minimum_Score = 1000
- i = 0
-
-
- j = 0
- for k, z in State.children.items():
- Result = Algo.Maximum(z, Ply_num - 1, Minimum_Score)
- if Minimum_Score > Result:
- Minimum_Score = Result
- i = k[0]
- j = k[1]
-
- return (i, j)
-
-
- def Maximum(State, Ply_num, Alpha): # Alpha-beta pruning function for taking care of Alpha values
- if Ply_num == 0:
- return State.CurrentScore
-
- for i in range(State.Current.dimY):
- for j in range(State.Current.dimX):
- if State.Current.Mat[i][j] == ' ' and (j, i) not in State.children:
- State.Make(j, i, False)
-
- Maximum_Score = -1000
- i = 0
- j = 0
- for k, z in State.children.items():
- Result = Algo.Minimum(z, Ply_num - 1, Maximum_Score)
- if Maximum_Score < Result:
- Maximum_Score = Result
- if Result > Alpha:
- return Result
-
- return Maximum_Score
-
-
- def Minimum(State, Ply_num, Beta): # Alpha-beta pruning function for taking care of Beta values
- if Ply_num == 0:
- return State.CurrentScore
-
- for i in range(State.Current.dimY):
- for j in range(State.Current.dimX):
- if State.Current.Mat[i][j] == ' ' and (j, i) not in State.children:
- State.Make(j, i, True)
-
- Minimum_Score = 1000
- i = 0
- j = 0
- for k, z in State.children.items():
- Result = Algo.Maximum(z, Ply_num - 1, Minimum_Score)
- if Minimum_Score > Result:
- Minimum_Score = Result
- if Result < Beta:
- return Result
-
- return Minimum_Score
-
-# class MCTSNode(object):
-# """Monte Carlo Tree Node.
-# Each node encapsulates a particular game state, the moves that
-# are possible from that state and the strategic information accumulated
-# by the tree search as it progressively samples the game space.
-# """
-
-# def __init__(self, state, parent=None, move=None):
-# self.parent = parent
-# self.move = move
-# self.state = state
-
-# self.plays = 0
-# self.score = 0
-
-# self.pending_moves = state.get_moves()
-# self.children = []
-
-# def select_child_ucb(self):
-# # Note that each node's plays count is equal
-# # to the sum of its children's plays
-# def ucb(child):
-# win_ratio = child.score / child.plays \
-# + math.sqrt(2 * math.log(self.plays) / child.plays)
-# return win_ratio
-
-# return max(self.children, key=ucb)
-
-# def expand_move(self, move):
-# self.pending_moves.remove(move) # raises KeyError
-
-# child_state = deepcopy(self.state)
-# child_state.play_move(move)
-
-# child = MCTSNode(state=child_state, parent=self, move=move)
-# self.children.append(child)
-# return child
-
-# def get_score(self, result):
-# # return result
-# if result == 0.5:
-# return result
-
-# if self.state.player == 2:
-# if self.state.next_turn_player == result:
-# return 0.0
-# else:
-# return 1.0
-# else:
-# if self.state.next_turn_player == result:
-# return 1.0
-# else:
-# return 0.0
-
-# if self.state.next_turn_player == result:
-# return 0.0
-# else:
-# return 1.0
-
-# def __repr__(self):
-# s = 'ROOT\n' if self.parent is None else ''
-
-# children_moves = [c.move for c in self.children]
-
-# s += """Score ratio: {score} / {plays}
-# Pending moves: {pending_moves}
-# Children's moves: {children_moves}
-# State:
-# {state}\n""".format(children_moves=children_moves, **self.__dict__)
-
-# return s \ No newline at end of file
diff --git a/Board.py b/Board.py
deleted file mode 100644
index 405ec23..0000000
--- a/Board.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from random import *
-
-
-class Game: #A class for managing different situations and states happening in the game and on the board
- def __init__(self, Mat, dimX, dimY):
- self.Mat = Mat
- self.dimX = dimX
- self.dimY = dimY
-
- def Initiate(self): #initiating the game board with X and Y dimensions
- for i in range(0, self.dimY):
- R = []
- for j in range (0, self.dimX):
- if i % 2 == 1 and j % 2 == 1:
- R.append(1) # Assigning a random number from 1 to 9 to the spots in the board as the points
- elif i % 2 == 0 and j % 2 == 0:
- R.append('*') # printing asterisks as the dots in the board
- else:
- R.append(' ') # adding extra space for actions in the game
- self.Mat.append(R)
-
- def Get_matrix(self): # Board matrix
- ans = []
- for i in range(0, self.dimY):
- R = []
- for j in range(0, self.dimX):
- R.append(self.Mat[i][j])
- ans.append(R)
- return ans
-
- def Draw_mat(self): # Drawing the board marix as dots and lines
-
- if self.dimX > 9:
- print(" ", end='')
- print(" ", end='')
- for i in range(0, self.dimX):
- print(str(i), end=' ')
- print()
-
- if self.dimX > 9:
- print(" ", end='')
- print(" ", end='')
- for i in range(0, self.dimX + 1):
- print("___", end='')
- print()
- for j in range(self.dimY):
- if self.dimX > 9 and j < 10:
- print(" ", end='')
- print(str(j) + "| ", end='')
- for z in range(self.dimX):
- print(str(self.Mat[j][z]), end=' ')
- print()
- print(" _________________________\n")
-
- def Get_currentState(self):
- return Game(self.Get_matrix(), self.dimX, self.dimY)
-
- def action(self, i, j): # Applying the actions made by the human or the computer
- Sum = 0
-
- if j % 2 == 0 and i % 2 == 1:
- self.Mat[j][i] = '-'
- if j < self.dimY - 1:
- if self.Mat[j+2][i] == '-' and self.Mat[j+1][i+1] == '|' and self.Mat[j+1][i-1] == '|':
- Sum += self.Mat[j+1][i]
- if j > 0:
- if self.Mat[j-2][i] == '-' and self.Mat[j-1][i+1] == '|' and self.Mat[j-1][i-1] == '|':
- Sum += self.Mat[j-1][i]
-
- else:
- self.Mat[j][i] = '|'
- if i < self.dimX - 1:
- if self.Mat[j][i+2] == '|' and self.Mat[j+1][i+1] == '-' and self.Mat[j-1][i+1] == '-':
- Sum += self.Mat[j][i+1]
- if i > 0:
- if self.Mat[j][i-2] == '|' and self.Mat[j+1][i-1] == '-' and self.Mat[j-1][i-1] == '-':
- Sum += self.Mat[j][i-1]
- return Sum \ No newline at end of file
diff --git a/DotsNBoxes.py b/DotsNBoxes.py
deleted file mode 100644
index 14cdd2a..0000000
--- a/DotsNBoxes.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from random import *
-import collections
-from Algorithm import *
-from Board import *
-from Nodes import *
-import MCTS
-
-
-class DotsNBoxes: # A class for managing the moves made by the human and the computer
- def __init__(self, Board_Xdim, Board_Ydim, Ply_num):
- currentState = Game([], Board_Xdim, Board_Ydim)
- currentState.Initiate()
- self.State = Thing(currentState)
- self.Ply_num = Ply_num
- self.Score = 0
-
- def Human(self): # Defining the Human player and his actions/Choices
- self.State.Draw()
-
- # HumanX = int(input("Please enter the 'X' coordinate of your choice (an integer such as 4): "))
- # HumanY = int(input("Please enter the 'Y' coordinate of your choice (an integer such as 4): "))
- # if (HumanX, HumanY) not in self.State.children:
- # self.State.Make(HumanX, HumanY, False)
- # self.State = self.State.children[(HumanX, HumanY)]
-
- # else:
- # self.State = self.State.children[(HumanX, HumanY)]
-
- move = Algo.miniMax(self.State, 2)
-
- self.State = self.State.children[(move[0], move[1])]
-
- print("AI1 selected the following coordinates to play:\n" + "(" ,str(move[0]), ", " + str(move[1]), ")", end = "\n\n")
-
- print("Current Score =====>> Your Score - AI Score = " + str(self.State.CurrentScore), end = "\n\n\n")
-
- if len(self.State.children) == 0:
- self.State.Draw()
- self.Evaluation()
- return
-
- self.Computer()
-
-
- def Computer(self): # Defining the Computer player and its actions/Choices
- self.State.Draw()
-
- # Temporarily commented out. TODO hardcore MCTS into working then alternate comments lol
- move = Algo.miniMax(self.State, 3)
- self.State = self.State.children[(move[0], move[1])]
-
- # move = MCTS.MCTSGameController() # check what MCTSGameController returns?
-
-
- print("AI2 selected the following coordinates to play:\n" + "(" ,str(move[0]), ", " + str(move[1]), ")", end = "\n\n")
-
- print("Current Score =====>> Your Score - AI Score = " + str(self.State.CurrentScore), end = "\n\n\n")
-
- if len(self.State.children) == 0:
- self.State.Draw()
- self.Evaluation()
- return
-
- self.Human()
-
- def Evaluation(self): # Evaluation function for taking care of the final scores
- print("Stop this Madness!!!\n")
- if self.State.CurrentScore > 0:
- print("You won you crazy little unicorn!! You are the new hope for the mankind!" + str(self.State.CurrentScore))
- exit()
- elif self.State.CurrentScore < 0:
- print("!!! Inevitable Doom!!! You were crushed by the AI!! "+ str(self.State.CurrentScore))
- exit()
- else:
- print("Draw! Well Congratulations! you are as smart as the AI!")
- exit()
-
- def start(self):
- self.Human() \ No newline at end of file
diff --git a/Nodes.py b/Nodes.py
deleted file mode 100644
index 51a256b..0000000
--- a/Nodes.py
+++ /dev/null
@@ -1,18 +0,0 @@
-class Thing: # A class for Node related operations
- def __init__(self, currentState):
- self.Current = currentState
- self.CurrentScore = 0
- self.children = {}
-
- def Make(self, i, j, player): # Function for generating a child node
- self.children[(i, j)] = Thing(self.Current.Get_currentState())
- mul = 1
- if player:
- mul *= -1
- self.children[(i, j)].CurrentScore = (self.children[(i, j)].Current.action(i, j) * mul) + self.CurrentScore
-
- def Populate(self, i, j, Child): # Function for adding a node
- self.children[(i,j)] = Child
-
- def Draw(self): # function for drawing the board
- self.Current.Draw_mat() \ No newline at end of file
diff --git a/python/GameState.py b/python/GameState.py
new file mode 100644
index 0000000..eed8f36
--- /dev/null
+++ b/python/GameState.py
@@ -0,0 +1,144 @@
+from random import choice
+
+# Based on https://github.com/DieterBuys/mcts-player/
+
+class GameState(object):
+
+ def __init__(self):
+ self.next_turn_player = 1
+ self.player = None
+
+ @property
+ def game_result(self):
+ return None
+
+ def get_moves(self):
+ return set()
+
+ def get_random_move(self):
+ moves = self.get_moves()
+ return choice(tuple(moves)) if moves != set() else None
+
+ def play_move(self, move):
+ pass
+
+
+class DotsAndBoxesState(GameState):
+ def __init__(self, nb_rows, nb_cols, player):
+ super(DotsAndBoxesState, self).__init__()
+
+ self.nb_rows = nb_rows
+ self.nb_cols = nb_cols
+ rows = []
+ for ri in range(nb_rows + 1):
+ columns = []
+ for ci in range(nb_cols + 1):
+ columns.append({"v": 0, "h": 0})
+ rows.append(columns)
+ self.board = rows
+
+ self.score = {1: 0, 2: 0}
+ self.player = player
+ print("Player: ", player)
+
+ @property
+ def game_result(self):
+ def game_decided(nb_cols, nb_rows, scoreP, scoreO):
+ # the game is decided if the winner is already known even before the game is ended
+ # you're guaranteed to win the game if you have more than halve of the total points that can be earned
+ total_points = nb_rows * nb_cols
+ if scoreP > total_points // 2 or scoreO > total_points // 2:
+ return True
+ else:
+ return False
+
+ # check if the board is full, then decide based on score
+ free_lines = self.get_moves()
+ player = self.player
+ opponent = self.player % 2 + 1
+
+ if not game_decided(self.nb_cols, self.nb_rows, self.score[player], self.score[opponent]) and len(free_lines) > 0:
+ return None
+ elif self.score[player] > self.score[opponent]:
+ return 1
+ elif self.score[player] < self.score[opponent]:
+ return 0
+ else:
+ return 0.5
+
+ def get_moves(self):
+ free_lines = []
+ for ri in range(len(self.board)):
+ row = self.board[ri]
+ for ci in range(len(row)):
+ cell = row[ci]
+ if ri < (len(self.board) - 1) and cell["v"] == 0:
+ free_lines.append((ri, ci, "v"))
+ if ci < (len(row) - 1) and cell["h"] == 0:
+ free_lines.append((ri, ci, "h"))
+ return set(free_lines)
+
+ def play_move(self, move):
+ r, c, o = move
+ assert move in self.get_moves()
+
+ # check if this move makes a box
+ makes_box = False
+ if o == "h":
+ if r - 1 >= 0:
+ # check above
+ if self.board[r-1][c]["h"] != 0 and self.board[r-1][c]["v"] != 0 and self.board[r-1][c+1]["v"] != 0:
+ makes_box = True
+ self.score[self.next_turn_player] += 1
+ if r + 1 <= self.nb_rows:
+ # check below
+ if self.board[r+1][c]["h"] != 0 and self.board[r][c]["v"] != 0 and self.board[r][c+1]["v"] != 0:
+ makes_box = True
+ self.score[self.next_turn_player] += 1
+
+ elif o == "v":
+ if c - 1 >= 0:
+ # check left
+ if self.board[r][c-1]["v"] != 0 and self.board[r][c-1]["h"] != 0 and self.board[r+1][c-1]["h"] != 0:
+ makes_box = True
+ self.score[self.next_turn_player] += 1
+
+ if c + 1 <= self.nb_cols:
+ # check right
+ if self.board[r][c+1]["v"] != 0 and self.board[r][c]["h"] != 0 and self.board[r+1][c]["h"] != 0:
+ makes_box = True
+ self.score[self.next_turn_player] += 1
+
+
+ # register move
+ self.board[r][c][o] = self.next_turn_player
+
+ if not makes_box:
+ # switch turns
+ self.next_turn_player = self.next_turn_player % 2 + 1
+
+ def __repr__(self):
+ str = ""
+ for r in range(self.nb_rows + 1):
+ for o in ["h", "v"]:
+ for c in range(self.nb_cols + 1):
+ if o == "h":
+ str += "."
+ if c != self.nb_cols:
+ if self.board[r][c][o] == 0:
+ str += " "
+ else:
+ str += "__"
+ else:
+ str += "\n"
+ elif o == "v":
+ if r != self.nb_rows:
+ if self.board[r][c][o] == 0:
+ str += " "
+ else:
+ str += "|"
+ if c != self.nb_cols:
+ str += " "
+ else:
+ str += "\n"
+ return str
diff --git a/python/MCTS.py b/python/MCTS.py
new file mode 100644
index 0000000..a65e2d4
--- /dev/null
+++ b/python/MCTS.py
@@ -0,0 +1,151 @@
+import math
+from copy import deepcopy
+from time import clock
+from random import choice
+
+from GameState import GameState
+
+# Based on https://github.com/DieterBuys/mcts-player/
+
+class GameController(object):
+ def get_next_move(self, state):
+ # when you get a new move, it is assumed that the game is not ended yet
+ assert state.get_moves()
+
+
+class MCTSNode(object):
+ """Monte Carlo Tree Node.
+ Each node encapsulates a particular game state, the moves that
+ are possible from that state and the strategic information accumulated
+ by the tree search as it progressively samples the game space.
+ """
+
+ def __init__(self, state, parent=None, move=None):
+ self.parent = parent
+ self.move = move
+ self.state = state
+
+ self.plays = 0
+ self.score = 0
+
+ self.pending_moves = state.get_moves()
+ self.children = []
+
+ def select_child_ucb(self):
+ # Note that each node's plays count is equal
+ # to the sum of its children's plays
+ def ucb(child):
+ win_ratio = child.score / child.plays \
+ + math.sqrt(2 * math.log(self.plays) / child.plays)
+ return win_ratio
+
+ return max(self.children, key=ucb)
+
+ def expand_move(self, move):
+ self.pending_moves.remove(move) # raises KeyError
+
+ child_state = deepcopy(self.state)
+ child_state.play_move(move)
+
+ child = MCTSNode(state=child_state, parent=self, move=move)
+ self.children.append(child)
+ return child
+
+ def get_score(self, result):
+ # return result
+ if result == 0.5:
+ return result
+
+ if self.state.player == 2:
+ if self.state.next_turn_player == result:
+ return 0.0
+ else:
+ return 1.0
+ else:
+ if self.state.next_turn_player == result:
+ return 1.0
+ else:
+ return 0.0
+
+ if self.state.next_turn_player == result:
+ return 0.0
+ else:
+ return 1.0
+
+ def __repr__(self):
+ s = 'ROOT\n' if self.parent is None else ''
+
+ children_moves = [c.move for c in self.children]
+
+ s += """Score ratio: {score} / {plays}
+Pending moves: {pending_moves}
+Children's moves: {children_moves}
+State:
+{state}\n""".format(children_moves=children_moves, **self.__dict__)
+
+ return s
+
+
+class MCTSGameController(GameController):
+ """Game controller that uses MCTS to determine the next move.
+ This is the class which implements the Monte Carlo Tree Search algorithm.
+ It builds a game tree of MCTSNodes and samples the game space until a set
+ time has elapsed.
+ """
+
+ def select(self):
+ node = self.root_node
+
+ # Descend until we find a node that has pending moves, or is terminal
+ while node.pending_moves == set() and node.children != []:
+ node = node.select_child_ucb()
+
+ return node
+
+ def expand(self, node):
+ assert node.pending_moves != set()
+
+ move = choice(tuple(node.pending_moves))
+ return node.expand_move(move)
+
+ def simulate(self, state, max_iterations=1000):
+ state = deepcopy(state)
+
+ move = state.get_random_move()
+ while move is not None:
+ state.play_move(move)
+ move = state.get_random_move()
+
+ max_iterations -= 1
+ if max_iterations <= 0:
+ return 0.5 # raise exception? (game too deep to simulate)
+
+ return state.game_result
+
+ def update(self, node, result):
+ while node is not None:
+ node.plays += 1
+ node.score += node.get_score(result)
+ node = node.parent
+
+ def get_next_move(self, state, time_allowed=1.0):
+ super(MCTSGameController, self).get_next_move(state)
+
+ # Create new tree (TODO: Preserve some state for better performance?)
+ self.root_node = MCTSNode(state)
+ iterations = 0
+
+ start_time = clock()
+ while clock() < start_time + time_allowed:
+ node = self.select()
+
+ if node.pending_moves != set():
+ node = self.expand(node)
+
+ result = self.simulate(node.state)
+ self.update(node, result)
+
+ iterations += 1
+
+ # Return most visited node's move
+ return max(self.root_node.children, key=lambda n: n.plays).move
diff --git a/python/agent.py b/python/agent.py
new file mode 100644
index 0000000..49bc1cc
--- /dev/null
+++ b/python/agent.py
@@ -0,0 +1,55 @@
+import dotsandboxes.dotsandboxesagent as dba
+
+import sys
+import argparse
+import logging
+from GameState import GameState, DotsAndBoxesState
+from MCTS import MCTSNode, MCTSGameController
+
+
+logger = logging.getLogger(__name__)
+games = {}
+agentclass = None
+
+
+class Agent(dba.DotsAndBoxesAgent):
+ def __init__(self, player, nb_rows, nb_cols, timelimit):
+ super(Agent, self).__init__(player, nb_rows, nb_cols, timelimit)
+ self.GameStateClass = DotsAndBoxesState
+ self.game_state = self.GameStateClass(nb_rows, nb_cols, player)
+ self.controller = MCTSGameController()
+
+ def register_action(self, row, column, orientation, player):
+ super(Agent, self).register_action(row, column, orientation, player)
+ # adjust agent specific board representation
+ move = (row, column, orientation)
+ self.game_state.play_move(move)
+
+ def next_action(self):
+ r, c, o = self.controller.get_next_move(self.game_state, time_allowed=self.timelimit)
+ return r, c, o
+
+ def end_game(self):
+ super(Agent, self).end_game()
+
+
+# Adapted from provided code
+def main(argv=None):
+ global agentclass
+ parser = argparse.ArgumentParser(description='Start agent to play Dots and Boxes')
+ parser.add_argument('--verbose', '-v', action='count', default=0, help='Verbose output')
+ parser.add_argument('--quiet', '-q', action='count', default=0, help='Quiet output')
+ parser.add_argument('port', metavar='PORT', type=int, help='Port to use for server')
+ args = parser.parse_args(argv)
+
+ logger.setLevel(max(logging.INFO - 10 * (args.verbose - args.quiet), logging.DEBUG))
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ agentclass = Agent
+ dba.agentclass = Agent
+ dba.start_server(args.port)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
+
diff --git a/python/alphaBeta.py b/python/alphaBeta.py
new file mode 100644
index 0000000..01f82cf
--- /dev/null
+++ b/python/alphaBeta.py
@@ -0,0 +1,30 @@
+
+def alpha_beta(node, alpha, beta):
+
+ # Based on https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning#Pseudocode
+ # node needs to support three operations: isTerminal(), value(), getChildren(), maximizingPlayer()
+
+ if node.isTerminal():
+ return node.value()
+
+ if node.maximizingPlayer():
+
+ v = float("-inf")
+ for child in node.getChildren():
+
+ v = max(v, alpha_beta(child, alpha, beta))
+ alpha = max(alpha, v)
+ if beta <= alpha:
+ break
+
+ else:
+
+ v = float("inf")
+ for child in node.getChildren():
+
+ v = min(v, alpha_beta(child, alpha, beta))
+ beta = min(beta, v)
+ if beta <= alpha:
+ break
+
+ return v
diff --git a/python/ann.py b/python/ann.py
new file mode 100644
index 0000000..05ae647
--- /dev/null
+++ b/python/ann.py
@@ -0,0 +1,170 @@
+from numpy import *
+from math import sqrt
+from copy import deepcopy
+from time import time
+
+class ANN:
+
+ """ANN with one hidden layer, one output and full connections in between consecutive layers.
+ Initial weights are chosen from a normal distribution.
+ Activation function is tanh."""
+
+ INIT_SIGMA = 0.02
+ REL_STOP_MARGIN = 0.01
+ MAX_ITERATIONS = 1000000
+ ACTIVATION = tanh
+ D_ACTIVATION = lambda x: 1 - tanh(x)**2 # Derivative of tanh
+ VEC_ACTIVATION = vectorize(ACTIVATION)
+ VEC_D_ACTIVATION = vectorize(D_ACTIVATION)
+ STEP_SIZE = 0.1
+
+ def __init__(self, input_size, hidden_size):
+
+ #self.input_size = input_size
+ #self.hidden_size = hidden_size
+ self.hidden_weights = random.normal(0, ANN.INIT_SIGMA, (hidden_size, input_size))
+ self.output_weights = random.normal(0, ANN.INIT_SIGMA, hidden_size)
+
+ def get_weights(self):
+ return self.hidden_weights, self.output_weights
+
+ def predict(self, input_vector):
+
+ # Predicts the output for this input vector
+ # input_vector will be normalized
+
+ input_vector = input_vector/linalg.norm(input_vector)
+ return ANN.ACTIVATION(dot(self.output_weights, ANN.VEC_ACTIVATION(dot(self.hidden_weights, input_vector))))
+
+ @staticmethod
+ def frob_norm(a, b):
+
+ # Calculates the total Frobenius norm of both matrices A and B
+ return sqrt(linalg.norm(a)**2 + linalg.norm(b)**2)
+
+ def train(self, examples):
+
+ #print("Training")
+ start = time()
+
+ # examples is a list of (input, output)-tuples
+ # input will be normalized
+ # We stop when the weights have converged within some relative margin
+
+ for example in examples:
+ example[0] = example[0]/linalg.norm(example[0])
+
+ iteration = 0
+ while True:
+
+
+ # Store old weights to check for convergence later
+ prev_hidden_weights = deepcopy(self.hidden_weights)
+ prev_output_weights = deepcopy(self.output_weights)
+
+ for k in range(len(examples)):
+
+ input_vector, output = examples[k]
+
+ # Calculate outputs
+ hidden_input = dot(self.hidden_weights, input_vector)
+ hidden_output = ANN.VEC_ACTIVATION(hidden_input)
+ final_input = dot(self.output_weights, hidden_output)
+ predicted_output = ANN.ACTIVATION(final_input)
+
+ #print("Output:", output)
+ #print("Predicted output:", predicted_output)
+
+ # Used in calculations
+ prediction_error = output - predicted_output
+ output_derivative = ANN.D_ACTIVATION(final_input)
+
+ # Adjust output weights and calculate requested hidden change
+ requested_hidden_change = prediction_error*output_derivative*self.output_weights
+ self.output_weights = self.output_weights + ANN.STEP_SIZE*prediction_error*hidden_output
+
+ #print("After adjusting output weights:", ANN.ACTIVATION(dot(self.output_weights, hidden_output)))
+
+ # Backpropagate requested hidden change to adjust hidden weights
+ self.hidden_weights = self.hidden_weights + ANN.STEP_SIZE*outer(requested_hidden_change*(ANN.VEC_D_ACTIVATION(hidden_input)), input_vector)
+
+ #print("After adjusting hidden weights:", ANN.ACTIVATION(dot(self.output_weights, ANN.VEC_ACTIVATION(dot(self.hidden_weights, input_vector)))))
+
+ # Check stop criteria
+ iteration += 1
+ if iteration >= ANN.MAX_ITERATIONS:
+ break
+
+ # Check stop criteria
+ if iteration >= ANN.MAX_ITERATIONS:
+ break
+ diff = ANN.frob_norm(self.hidden_weights - prev_hidden_weights, self.output_weights - prev_output_weights)
+ base = ANN.frob_norm(self.hidden_weights, self.output_weights)
+ #if base > 0 and diff/base < ANN.REL_STOP_MARGIN:
+ # break
+
+ print(time() - start)
+ print("Stopped training after %s iterations."%iteration)
+
+# TESTING
+
+def print_difference(ann1, ann2):
+
+ # Prints the differences in weights in between two ANN's with identical topology
+
+ hidden_weights1, output_weights1 = ann1.get_weights()
+ hidden_weights2, output_weights2 = ann2.get_weights()
+ hidden_diff = hidden_weights1 - hidden_weights2
+ output_diff = output_weights1 - output_weights2
+
+ print(hidden_diff)
+ print(output_diff)
+ print("Frobenius norms:")
+ print("Hidden weights difference:", linalg.norm(hidden_diff))
+ print("Output weights difference:", linalg.norm(output_diff))
+ print("Both:", ANN.frob_norm(hidden_diff, output_diff))
+
+def RMSE(ann, examples):
+
+ total = 0
+ for input_vector, output in examples:
+ total += (output - ann.predict(input_vector))**2
+ return sqrt(total/len(examples))
+
+def generate_examples(amount, input_size, evaluate):
+ # evaluate is a function mapping an input vector onto a numerical value
+ examples = []
+ inputs = random.normal(0, 100, (amount, input_size))
+ for i in range(amount):
+ input_vector = inputs[i]
+ examples.append([input_vector, evaluate(input_vector)])
+ return examples
+
+def test():
+
+ # Test the ANN by having it model another ANN with identical topology but unknown weights
+
+ input_size = 5
+ hidden_size = 3
+ real = ANN(input_size, hidden_size)
+ model = ANN(input_size, hidden_size)
+
+ # Generate training data
+ training_data = generate_examples(10000, input_size, real.predict)
+ validation_data = generate_examples(10000, input_size, real.predict)
+
+ # Print initial difference, train, then print new difference
+ print("Initial difference:")
+ print_difference(real, model)
+ print("Initial RMSE (on training data):", RMSE(model, training_data))
+ print("Initial RMSE (on validation data):", RMSE(model, validation_data))
+ model.train(training_data)
+ print("After training:")
+ print_difference(real, model)
+ print("After training RMSE (on training data):", RMSE(model, training_data))
+ print("After training RMSE (on validation data):", RMSE(model, validation_data))
+
+if __name__ == "__main__":
+ test()
+
+
diff --git a/python/board.py b/python/board.py
new file mode 100644
index 0000000..36cfe8c
--- /dev/null
+++ b/python/board.py
@@ -0,0 +1,110 @@
+from GameState import GameState
+
+class Board(GameState):
+
+ # Interface methods
+
+ def __init__(self, nb_rows, nb_cols, player):
+
+ # Basic initialization
+ self.nb_rows = nb_rows
+ self.nb_cols = nb_cols
+ self.player = player # 1 or 2
+ self.scores = [0, 0, 0]
+ self.max_score = nb_rows*nb_cols
+
+ # Construct edges and nodes matrix
+ self.edges = [] # Boolean, true means uncut
+ self.nodes = [] # Represents valence of nodes from strings-and-coins
+ for x in range(self.nb_cols):
+ self.edges.append([True]*self.nb_rows)
+ self.edges.append([True]*(self.nb_rows + 1))
+ self.nodes.append([4]*self.nb_rows)
+ self.edges.append([True]*self.nb_rows)
+
+ # Construct all possible moves
+ self.moves_left = set() # Moves are represented as ints
+ for x in range(len(self.edges)):
+ for y in range(len(self.edges[x])):
+ self.moves_left.add(self.coords_to_edge(x, y))
+
+ # TODO: chain updating
+ # Initialize chains
+ # Chains are represented as lists of nodes
+ self.closed_chains = [] # Chains which start and end in nodes of valence
+
+ @property
+ def game_result(self):
+
+ own_score = self.scores[self.player]
+ opponent_score = self.scores[self.player % 2 + 1]
+ if len(self.moves_left) == 0:
+ diff = own_score - opponent_score
+ if diff > 0:
+ return 1
+ elif diff < 0:
+ return 0
+ else:
+ return 0.5
+ else:
+ # Check if one player already has at least half of all points
+ if own_score > self.max_score//2:
+ return 1
+ elif opponent_score > self.max_score//2:
+ return 0
+ else:
+ return None
+
+ def get_moves(self):
+ return self.moves_left
+
+ def get_random_move(self):
+ moves = self.get_moves()
+ return choice(tuple(moves)) if moves != set() else None
+
+ def play_move(self, move):
+ x, y = self.edge_to_coords(move)
+ self.edges[x][y] = False
+ self.moves_left.remove(move)
+
+ # Update valence
+ if x%2 == 0:
+ # Horizontal edge, decrease valence of left and right nodes
+ for node_x in x//2 - 1, x//2:
+ if node_x >= 0 and node_x < nb_cols:
+ self.nodes[node_x][y] -= 1
+ else:
+ # Vertical edge, decrease valence of top and bottom nodes
+ for node_y in y - 1, y:
+ if node_y >= 0 and node_y < nb_rows:
+ self.nodes[x//2][node_y] -= 1
+
+ # TODO: chain updating
+
+ # Own methods
+
+ def undo_move(self, move):
+ x, y = self.edge_to_coords(move)
+ self.edges[x][y] = True
+ self.moves_left.add(move)
+
+ # Update valence
+ if x%2 == 0:
+ # Horizontal edge, decrease valence of left and right nodes
+ for node_x in x//2 - 1, x//2:
+ if node_x >= 0 and node_x < nb_cols:
+ self.nodes[node_x][y] += 1
+ else:
+ # Vertical edge, decrease valence of top and bottom nodes
+ for node_y in y - 1, y:
+ if node_y >= 0 and node_y < nb_rows:
+ self.nodes[x//2][node_y] += 1
+
+ # TODO: chain updating
+
+ def coords_to_edge(self, x, y):
+ return x*(self.nb_cols + 1) + y
+
+ def edge_to_coords(self, move):
+ return move//(self.nb_cols + 1), move%(self.nb_cols + 1)
+
diff --git a/python/dataStructures.py b/python/dataStructures.py
new file mode 100644
index 0000000..1e972fc
--- /dev/null
+++ b/python/dataStructures.py
@@ -0,0 +1,32 @@
+class DisjointSet:
+ def __init__(self, nbElements):
+ self.parent = []
+ self.rank = []
+ for i in range(0, nbElements):
+ self.parent[i] = i
+ self.rank[i] = 0
+
+ def find(self, x):
+ if self.parent[x] != x:
+ self.parent[x] = self.find(self.parent[x])
+ return self.parent[x]
+
+ def union(self, x, y):
+ xRoot = self.find(x)
+ yRoot = self.find(y)
+
+ # x and y already belong to the same set
+ if xRoot == yRoot:
+ return
+
+ # merge the set of x and y
+ if self.rank[xRoot] < self.rank[yRoot]:
+ self.parent[xRoot] = yRoot
+ elif self.rank[xRoot] > self.rank[yRoot]:
+ self.parent[yRoot] = xRoot
+ else:
+ self.parent[xRoot] = yRoot
+ self.rank[yRoot] += 1
+
+ def inSameSet(self, x, y):
+ return self.find(x) == self.find(y) \ No newline at end of file
diff --git a/python/dotsandboxes/README.md b/python/dotsandboxes/README.md
new file mode 100644
index 0000000..e3f844c
--- /dev/null
+++ b/python/dotsandboxes/README.md
@@ -0,0 +1,134 @@
+Dots and Boxes application
+==========================
+
+Live demo: https://people.cs.kuleuven.be/wannes.meert/dotsandboxes/play
+
+![Screenshot of Dots and Boxes](https://people.cs.kuleuven.be/wannes.meert/dotsandboxes/screenshot.png?v=2)
+
+This setup is part of the course "Machine Learning: Project" (KU Leuven,
+Faculty of engineering, Department of Computer Science,
+[DTAI research group](https://dtai.cs.kuleuven.be)).
+
+
+Installation
+------------
+
+The example agent is designed for Python 3.6 and requires the
+[websockets](https://websockets.readthedocs.io) package. Dependencies can be
+installed using pip:
+
+ $ pip install -r requirements.txt
+
+
+Start the game GUI
+------------------
+
+This program shows a web-based GUI to play the Dots and Boxes
+game. This supports human-human, agent-human and agent-agent combinations.
+It is a simple Javascript based application that runs entirely in the browser.
+You can start it by opening the file `static/dotsandboxes.html` in a browser.
+Or alternatively, you can start the app using the included simple server:
+
+ $ ./dotsandboxesserver.py 8080
+
+The game can then be played by directing your browser to http://127.0.0.1:8080.
+
+
+Start the agent client
+----------------------
+
+This is the program that runs a game-playing agent. This application listens
+to [websocket](https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API)
+requests that communicate game information and sends back the next action it
+wants to play.
+
+Starting the agent client is done using the following command:
+
+ $ ./dotsandboxesagent <port>
+
+This starts a websocket on the given port that can receveive JSON messages.
+
+The JSON messages given below should be handled by your agent.
+Take into account the maximal time allowed to reply.
+
+### Initiate the game
+
+Both players get a message that a new game has started:
+
+ {
+ "type": "start",
+ "player": 1,
+ "timelimit", 0.5,
+ "grid": [5, 5],
+ "game": "123456"
+ }
+
+where `player` is the number assigned to this agent, `timelimit` is the
+time in seconds in which you need to send your action back to the server,
+and `grid` is the grid size in rows and columns.
+
+If you are player 1, reply with the first action you want to perform:
+
+ {
+ "type": "action",
+ "location": [1, 1],
+ "orientation": "v"
+ }
+
+The field `location` is expressed as row and column (zero-based numbering) and
+`orientation` is either "v" (vertical) or "h" (horizontal).
+
+
+### Action in the game
+
+When an action is played, the message sent to both players is:
+
+ {
+ "type": "action",
+ "game": "123456",
+ "player": 1,
+ "nextplayer": 2,
+ "score": [0, 0],
+ "location": [1, 1],
+ "orientation": "v"
+ }
+
+
+If it is your turn you should answer with a message that states your next
+move:
+
+ {
+ "type": "action",
+ "location": [1, 1],
+ "orientation": "v"
+ }
+
+
+### Game end
+
+When the game ends after an action, the message is slightly altered:
+
+ {
+ "type": "end",
+ "game": "123456",
+ "player": 1,
+ "nextplayer": 0,
+ "score": [3, 1],
+ "location": [1, 1],
+ "orientation": "v",
+ "winner": 1
+ }
+
+The `type` field becomes `end` and a new field `winner` is set to the player
+that has won the game.
+
+
+Contact information
+-------------------
+
+- Wannes Meert, https://people.cs.kuleuven.be/wannes.meert
+- Hendrik Blockeel, https://people.cs.kuleuven.be/hendrik.blockeel
+- Arne De Brabandere, https://people.cs.kuleuven.be/arne.debrabandere
+- Sebastijan Dumančić, https://people.cs.kuleuven.be/sebastijan.dumancic
+- Pieter Robberechts, https://people.cs.kuleuven.be/pieter.robberechts
+
diff --git a/python/dotsandboxes/dotsandboxesagent b/python/dotsandboxes/dotsandboxesagent
new file mode 100755
index 0000000..eecf719
--- /dev/null
+++ b/python/dotsandboxes/dotsandboxesagent
@@ -0,0 +1,5 @@
+#!/bin/bash
+# It is not necessary to use a shell script for this. Dropping the .py
+# extension and including the correct shebang is also correct.
+python3 $(dirname "$0")/dotsandboxesagent.py $@
+
diff --git a/python/dotsandboxes/dotsandboxesagent.py b/python/dotsandboxes/dotsandboxesagent.py
new file mode 100644
index 0000000..c8bc05e
--- /dev/null
+++ b/python/dotsandboxes/dotsandboxesagent.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+"""
+dotsandboxesagent.py
+
+Template for the Machine Learning Project course at KU Leuven (2017-2018)
+of Hendrik Blockeel and Wannes Meert.
+
+Copyright (c) 2018 KU Leuven. All rights reserved.
+"""
+import sys
+import argparse
+import logging
+import asyncio
+import websockets
+import json
+from collections import defaultdict
+import random
+
+
+logger = logging.getLogger(__name__)
+games = {}
+agentclass = None
+
+
+class DotsAndBoxesAgent:
+ """Example Dots and Boxes agent implementation base class.
+ It returns a random next move.
+
+ A DotsAndBoxesAgent object should implement the following methods:
+ - __init__
+ - add_player
+ - register_action
+ - next_action
+ - end_game
+
+ This class does not necessarily use the best data structures for the
+ approach you want to use.
+ """
+ def __init__(self, player, nb_rows, nb_cols, timelimit):
+ """Create Dots and Boxes agent.
+
+ :param player: Player number, 1 or 2
+ :param nb_rows: Rows in grid
+ :param nb_cols: Columns in grid
+ :param timelimit: Maximum time allowed to send a next action.
+ """
+ self.player = {player}
+ self.timelimit = timelimit
+ self.ended = False
+ self.nb_rows = nb_rows
+ self.nb_cols = nb_cols
+ rows = []
+ for ri in range(nb_rows + 1):
+ columns = []
+ for ci in range(nb_cols + 1):
+ columns.append({"v": 0, "h": 0})
+ rows.append(columns)
+ self.cells = rows
+
+ def add_player(self, player):
+ """Use the same agent for multiple players."""
+ self.player.add(player)
+
+ def register_action(self, row, column, orientation, player):
+ """Register action played in game.
+
+ :param row:
+ :param columns:
+ :param orientation: "v" or "h"
+ :param player: 1 or 2
+ """
+ self.cells[row][column][orientation] = player
+
+ def next_action(self):
+ """Return the next action this agent wants to perform.
+
+ In this example, the function implements a random move. Replace this
+ function with your own approach.
+
+ :return: (row, column, orientation)
+ """
+ logger.info("Computing next move (grid={}x{}, player={})"\
+ .format(self.nb_rows, self.nb_cols, self.player))
+ # Random move
+ free_lines = []
+ for ri in range(len(self.cells)):
+ row = self.cells[ri]
+ for ci in range(len(row)):
+ cell = row[ci]
+ if ri < (len(self.cells) - 1) and cell["v"] == 0:
+ free_lines.append((ri, ci, "v"))
+ if ci < (len(row) - 1) and cell["h"] == 0:
+ free_lines.append((ri, ci, "h"))
+ if len(free_lines) == 0:
+ # Board full
+ return None
+ movei = random.randint(0, len(free_lines) - 1)
+ r, c, o = free_lines[movei]
+ return r, c, o
+
+ def end_game(self):
+ self.ended = True
+
+
+## MAIN EVENT LOOP
+
+async def handler(websocket, path):
+ logger.info("Start listening")
+ game = None
+ # msg = await websocket.recv()
+ try:
+ async for msg in websocket:
+ logger.info("< {}".format(msg))
+ try:
+ msg = json.loads(msg)
+ except json.decoder.JSONDecodeError as err:
+ logger.error(err)
+ return False
+ game = msg["game"]
+ answer = None
+ if msg["type"] == "start":
+ # Initialize game
+ if msg["game"] in games:
+ games[msg["game"]].add_player(msg["player"])
+ else:
+ nb_rows, nb_cols = msg["grid"]
+ games[msg["game"]] = agentclass(msg["player"],
+ nb_rows,
+ nb_cols,
+ msg["timelimit"])
+ if msg["player"] == 1:
+ # Start the game
+ nm = games[game].next_action()
+ print('nm = {}'.format(nm))
+ if nm is None:
+ # Game over
+ logger.info("Game over")
+ continue
+ r, c, o = nm
+ answer = {
+ 'type': 'action',
+ 'location': [r, c],
+ 'orientation': o
+ }
+ else:
+ # Wait for the opponent
+ answer = None
+
+ elif msg["type"] == "action":
+ # An action has been played
+ r, c = msg["location"]
+ o = msg["orientation"]
+ games[game].register_action(r, c, o, msg["player"])
+ if msg["nextplayer"] in games[game].player:
+ # Compute your move
+ nm = games[game].next_action()
+ if nm is None:
+ # Game over
+ logger.info("Game over")
+ continue
+ nr, nc, no = nm
+ answer = {
+ 'type': 'action',
+ 'location': [nr, nc],
+ 'orientation': no
+ }
+ else:
+ answer = None
+
+ elif msg["type"] == "end":
+ # End the game
+ games[msg["game"]].end_game()
+ answer = None
+ else:
+ logger.error("Unknown message type:\n{}".format(msg))
+
+ if answer is not None:
+ print(answer)
+ await websocket.send(json.dumps(answer))
+ logger.info("> {}".format(answer))
+ except websockets.exceptions.ConnectionClosed as err:
+ logger.info("Connection closed")
+ logger.info("Exit handler")
+
+
+def start_server(port):
+ server = websockets.serve(handler, 'localhost', port)
+ print("Running on ws://127.0.0.1:{}".format(port))
+ asyncio.get_event_loop().run_until_complete(server)
+ asyncio.get_event_loop().run_forever()
+
+
+## COMMAND LINE INTERFACE
+
+def main(argv=None):
+ global agentclass
+ parser = argparse.ArgumentParser(description='Start agent to play Dots and Boxes')
+ parser.add_argument('--verbose', '-v', action='count', default=0, help='Verbose output')
+ parser.add_argument('--quiet', '-q', action='count', default=0, help='Quiet output')
+ parser.add_argument('port', metavar='PORT', type=int, help='Port to use for server')
+ args = parser.parse_args(argv)
+
+ logger.setLevel(max(logging.INFO - 10 * (args.verbose - args.quiet), logging.DEBUG))
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ agentclass = DotsAndBoxesAgent
+ start_server(args.port)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
+
diff --git a/python/dotsandboxes/dotsandboxescompete.py b/python/dotsandboxes/dotsandboxescompete.py
new file mode 100644
index 0000000..ee2aee8
--- /dev/null
+++ b/python/dotsandboxes/dotsandboxescompete.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+"""
+dotsandboxescompete.py
+
+Template for the Machine Learning Project course at KU Leuven (2017-2018)
+of Hendrik Blockeel and Wannes Meert.
+
+Copyright (c) 2018 KU Leuven. All rights reserved.
+"""
+
+import sys
+import argparse
+import logging
+import asyncio
+import websockets
+import json
+from collections import defaultdict
+import random
+import uuid
+import time
+
+logger = logging.getLogger(__name__)
+
+
+def start_competition(address1, address2, nb_rows, nb_cols, timelimit):
+ asyncio.get_event_loop().run_until_complete(connect_agent(address1, address2, nb_rows, nb_cols, timelimit))
+
+
+async def connect_agent(uri1, uri2, nb_rows, nb_cols, timelimit):
+ cur_game = str(uuid.uuid4())
+ winner = None
+ cells = []
+ cur_player = 1
+ points = [0, 0, 0]
+ timings = [None, [], []]
+
+ for ri in range(nb_rows + 1):
+ columns = []
+ for ci in range(nb_cols + 1):
+ columns.append({"v":0, "h":0, "p":0})
+ cells.append(columns)
+
+ logger.info("Connecting to {}".format(uri1))
+ async with websockets.connect(uri1) as websocket1:
+ logger.info("Connecting to {}".format(uri2))
+ async with websockets.connect(uri2) as websocket2:
+ logger.info("Connected")
+
+ # Start game
+ msg = {
+ "type": "start",
+ "player": 1,
+ "timelimit": timelimit,
+ "game": cur_game,
+ "grid": [nb_rows, nb_cols]
+ }
+ await websocket1.send(json.dumps(msg))
+ msg["player"] = 2
+ await websocket2.send(json.dumps(msg))
+
+ # Run game
+ while winner is None:
+ ask_time = time.time()
+ logger.info("Waiting for player {}".format(cur_player))
+ if cur_player == 1:
+ msg = await websocket1.recv()
+ else:
+ msg = await websocket2.recv()
+ recv_time = time.time()
+ diff_time = recv_time - ask_time
+ timings[cur_player].append(diff_time)
+ logger.info("Message received after (s): {}".format(diff_time))
+ try:
+ msg = json.loads(msg)
+ except json.decoder.JSONDecodeError as err:
+ logger.debug(err)
+ continue
+ if msg["type"] != "action":
+ logger.error("Unknown message: {}".format(msg))
+ continue
+ r, c = msg["location"]
+ o = msg["orientation"]
+ next_player = user_action(r, c, o, cur_player,
+ cells, points,
+ nb_rows, nb_cols)
+ if points[1] + points[2] == nb_cols * nb_rows:
+ # Game over
+ winner = 1
+ if points[2] == points[1]:
+ winner = 0
+ if points[2] > points[1]:
+ winner = 2
+ else:
+ msg = {
+ "type": "action",
+ "game": cur_game,
+ "player": cur_player,
+ "nextplayer": next_player,
+ "score": [points[1], points[2]],
+ "location": [r, c],
+ "orientation": o
+ }
+ await websocket1.send(json.dumps(msg))
+ await websocket2.send(json.dumps(msg))
+
+ cur_player = next_player
+
+ # End game
+ logger.info("Game ended: points1={} - points2={} - winner={}".format(points[1], points[2], winner))
+ msg = {
+ "type": "end",
+ "game": cur_game,
+ "player": cur_player,
+ "nextplayer": 0,
+ "score": [points[1], points[2]],
+ "location": [r, c],
+ "orientation": o,
+ "winner": winner
+ }
+ await websocket1.send(json.dumps(msg))
+ await websocket2.send(json.dumps(msg))
+
+ # Timings
+ for i in [1, 2]:
+ logger.info("Timings: player={} - avg={} - min={} - max={}"\
+ .format(i,
+ sum(timings[i])/len(timings[i]),
+ min(timings[i]),
+ max(timings[i])))
+
+ logger.info("Closed connections")
+
+
+def user_action(r, c, o, cur_player, cells, points, nb_rows, nb_cols):
+ logger.info("User action: player={} - r={} - c={} - o={}".format(cur_player, r, c, o))
+ next_player = cur_player
+ won_cell = False
+ cell = cells[r][c]
+ if o == "h":
+ if cell["h"] != 0:
+ return cur_player
+ cell["h"] = cur_player
+ # Above
+ if r > 0:
+ if cells[r - 1][c]["v"] != 0 \
+ and cells[r - 1][c + 1]["v"] != 0 \
+ and cells[r - 1][c]["h"] != 0 \
+ and cells[r][c]["h"] != 0:
+ won_cell = True
+ points[cur_player] += 1
+ cells[r - 1][c]["p"] = cur_player
+ # Below
+ if r < nb_rows:
+ if cells[r][c]["v"] != 0 \
+ and cells[r][c + 1]["v"] != 0 \
+ and cells[r][c]["h"] != 0 \
+ and cells[r + 1][c]["h"] != 0:
+ won_cell = True
+ points[cur_player] += 1
+ cells[r][c]["p"] = cur_player
+
+ if o == "v":
+ if cell["v"] != 0:
+ return cur_player
+ cell["v"] = cur_player;
+ # Left
+ if c > 0:
+ if cells[r][c - 1]["v"] != 0 \
+ and cells[r][c]["v"] != 0 \
+ and cells[r][c - 1]["h"] != 0 \
+ and cells[r + 1][c - 1]["h"] != 0:
+ won_cell = True
+ points[cur_player] += 1
+ cells[r][c - 1]["p"] = cur_player
+ # Right
+ if c < nb_cols:
+ if cells[r][c]["v"] != 0 \
+ and cells[r][c + 1]["v"] != 0 \
+ and cells[r][c]["h"] != 0 \
+ and cells[r + 1][c]["h"] != 0:
+ won_cell = True
+ points[cur_player] += 1
+ cells[r][c]["p"] = cur_player
+
+ if not won_cell:
+ next_player = 3 - cur_player
+ else:
+ next_player = cur_player
+ print("Update points: player1={} - player2={}".format(points[1], points[2]))
+ return next_player
+
+
+def main(argv=None):
+ parser = argparse.ArgumentParser(description='Start agent to play Dots and Boxes')
+ parser.add_argument('--verbose', '-v', action='count', default=0, help='Verbose output')
+ parser.add_argument('--quiet', '-q', action='count', default=0, help='Quiet output')
+ parser.add_argument('--cols', '-c', type=int, default=2, help='Number of columns')
+ parser.add_argument('--rows', '-r', type=int, default=2, help='Number of rows')
+ parser.add_argument('--timelimit', '-t', type=float, default=0.5, help='Time limit per request in seconds')
+ parser.add_argument('agents', nargs=2, metavar='AGENT', help='Websockets addresses for agents')
+ args = parser.parse_args(argv)
+
+ logger.setLevel(max(logging.INFO - 10 * (args.verbose - args.quiet), logging.DEBUG))
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ start_competition(args.agents[0], args.agents[1], args.rows, args.cols, args.timelimit)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
+
diff --git a/python/dotsandboxes/dotsandboxesserver.py b/python/dotsandboxes/dotsandboxesserver.py
new file mode 100644
index 0000000..1b66372
--- /dev/null
+++ b/python/dotsandboxes/dotsandboxesserver.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+"""
+dotsandboxesserver.py
+
+Template for the Machine Learning Project course at KU Leuven (2017-2018)
+of Hendrik Blockeel and Wannes Meert.
+
+Copyright (c) 2018 KU Leuven. All rights reserved.
+"""
+
+import sys
+import argparse
+import logging
+import http.server
+import socketserver
+import json
+
+logger = logging.getLogger(__name__)
+
+
+class RequestHandler(http.server.SimpleHTTPRequestHandler):
+ def do_GET(self):
+ if self.path == "/":
+ self.send_response(302)
+ self.send_header("Location", "static/dotsandboxes.html")
+ self.end_headers()
+ return super().do_GET()
+
+ def do_PUT(self):
+ response = {
+ 'result': 'ok'
+ }
+ self.send_response(200)
+ self.send_header('Content-type', 'application/json')
+ self.end_headers()
+ self.wfile.write(json.dumps(response).encode())
+
+
+def start_server(port):
+ with socketserver.TCPServer(("", port), RequestHandler) as httpd:
+ print("Running on http://127.0.0.1:{}".format(port))
+ httpd.serve_forever()
+
+
+def main(argv=None):
+ parser = argparse.ArgumentParser(description='Start server to play Dots and Boxes')
+ parser.add_argument('--verbose', '-v', action='count', default=0, help='Verbose output')
+ parser.add_argument('--quiet', '-q', action='count', default=0, help='Quiet output')
+ parser.add_argument('port', metavar='PORT', type=int, help='Port to use for server')
+ args = parser.parse_args(argv)
+
+ logger.setLevel(max(logging.INFO - 10 * (args.verbose - args.quiet), logging.DEBUG))
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ start_server(args.port)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/python/dotsandboxes/requirements.txt b/python/dotsandboxes/requirements.txt
new file mode 100644
index 0000000..14774b4
--- /dev/null
+++ b/python/dotsandboxes/requirements.txt
@@ -0,0 +1 @@
+websockets
diff --git a/python/dotsandboxes/static/dotsandboxes.css b/python/dotsandboxes/static/dotsandboxes.css
new file mode 100644
index 0000000..71b1d3b
--- /dev/null
+++ b/python/dotsandboxes/static/dotsandboxes.css
@@ -0,0 +1,10 @@
+
+.footer {
+ color: #B3B3B3;
+ margin-bottom: 1ex;
+}
+
+.footer a {
+ color: #87A0B3;
+}
+
diff --git a/python/dotsandboxes/static/dotsandboxes.html b/python/dotsandboxes/static/dotsandboxes.html
new file mode 100644
index 0000000..ecbcbb4
--- /dev/null
+++ b/python/dotsandboxes/static/dotsandboxes.html
@@ -0,0 +1,56 @@
+<!DOCTYPE html>
+<html>
+<html lang="en">
+<meta charset="utf-8">
+<meta name="author" content="Wannes Meert">
+<meta name="description" content="Dots-and-Boxes game. Part of the Machine Learning: Project course at KU Leuven (Hendrik Blockeel, Wannes Meert).">
+<meta name="keywords" content="artificial intelligence,AI,machine learning,dots and boxes,KU Leuven">
+<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
+<title>Dots and Boxes</title>
+<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
+<link rel="stylesheet" href="dotsandboxes.css">
+</head>
+<body>
+ <div class="container">
+ <h1>Dots and Boxes</h1>
+ <div class="row">
+ <div class="col-md">
+ <div id="playing-area"></div>
+ </div>
+ <div class="col-md">
+ <div class="form-group">
+ <p>Size of game:</p>
+ <div class="input-group">
+ <div class="input-group-prepend">
+ <span class="input-group-text">Rows and Columns</span>
+ </div>
+ <input type="number" class="form-control" id="nb-rows" value=6>
+ <input type="number" class="form-control" id="nb-cols" value=6>
+ </div>
+ </div>
+ <div class="form-group">
+ <p>Players:</p>
+ <div class="input-group mb-3">
+ <div class="input-group-prepend"><span class="input-group-text" id="basic-addon3">Agent 1</span></div>
+ <input type="text" class="form-control" id="agent1" aria-describedby="basic-addon3">
+ </div>
+ <div class="input-group mb-3">
+ <div class="input-group-prepend"><span class="input-group-text" id="basic-addon3">Agent 2</span></div>
+ <input type="text" class="form-control" id="agent2" aria-describedby="basic-addon3">
+ </div>
+ <p>Fill in the address where an agent can be reached using WebSockets (e.g. ws://127.0.0.1:8089).
+ If a field is empty a human player is assumed.
+ </p>
+ <button type="button" class="btn btn-secondary" id="restart-btn">Restart game</button>
+ </div>
+ </div>
+ </div>
+ <div class="footer">
+ <small>&copy; <a href="https://dtai.cs.kuleuven.be">DTAI Research Group</a>, KU Leuven &mdash; <a href="https://github.com/wannesm/dotsandboxes">Source</a></small>
+ </div>
+ </div>
+ <script src="https://d3js.org/d3.v4.min.js"></script>
+ <script src="dotsandboxes.js"></script>
+</body>
+</html>
+
diff --git a/python/dotsandboxes/static/dotsandboxes.js b/python/dotsandboxes/static/dotsandboxes.js
new file mode 100644
index 0000000..11e9447
--- /dev/null
+++ b/python/dotsandboxes/static/dotsandboxes.js
@@ -0,0 +1,454 @@
+/**
+ * dotsandboxes.js
+ *
+ * Template for the Machine Learning Project course at KU Leuven (2017-2018)
+ * of Hendrik Blockeel and Wannes Meert.
+ *
+ * Copyright (c) 2018 KU Leuven. All rights reserved.
+ **/
+
+function generateGuid() {
+ var result, i, j;
+ result = '';
+ for(j=0; j<32; j++) {
+ if( j == 8 || j == 12|| j == 16|| j == 20)
+ result = result + '-';
+ i = Math.floor(Math.random()*16).toString(16).toUpperCase();
+ result = result + i;
+ }
+ return result;
+}
+
+// GAME LOGIC
+
+var cur_game = generateGuid();
+var cur_player = 1;
+var cur_ended = false;
+var points = [0, 0, 0];
+var timelimit = 0.5;
+var nb_cols = 6;
+var nb_rows = 6;
+var data = new Array(0);
+
+function restart_game() {
+ //console.log("Restarting game");
+ cur_game = generateGuid();
+ nb_cols = parseInt(document.getElementById('nb-cols').value);
+ if (nb_cols == "" || isNaN(nb_cols)) {
+ nb_cols = 6;
+ }
+ nb_rows = parseInt(document.getElementById('nb-rows').value);
+ if (nb_rows == "" || isNaN(nb_rows)) {
+ nb_rows = 6;
+ }
+ cur_ended = false;
+ console.log("Starting game", cur_game);
+ points = [0, 0, 0];
+ cur_player = 1;
+ var old_length = 0;
+ for (var ri=0; ri<nb_rows + 1; ri++) {
+ if (ri >= data.length) {
+ data.push(new Array(0));
+ }
+ var row = data[ri];
+ for (var ci=0; ci<nb_cols + 1; ci++) {
+ if (ci >= row.length) {
+ row.push({l:0, t:0, p:0, r:0, c:0});
+ }
+ var l = 0;
+ var t = 0;
+ var p = 0;
+ if (ri == nb_rows) {
+ l = undefined;
+ p = undefined;
+ }
+ if (ci == nb_cols) {
+ t = undefined;
+ p = undefined
+ }
+ var cell = row[ci];
+ cell.l = l;
+ cell.t = t;
+ cell.p = p;
+ cell.r = ri;
+ cell.c = ci;
+ }
+ old_length = row.length;
+ for (var ci=nb_cols + 1; ci<old_length; ci++) {
+ row.pop();
+ }
+ }
+ old_length = data.length;
+ for (var ri=nb_rows + 1; ri<old_length; ri++) {
+ data.pop();
+ }
+}
+
+function user_click(cell, o) {
+ if (cur_ended) {
+ //console.log('Game ended, ignoring click');
+ return;
+ }
+ console.log('User click', cell, o);
+ var won_cell = false;
+ var c = cell.c;
+ var r = cell.r;
+ var msg = {
+ type: "action",
+ game: cur_game,
+ player: cur_player,
+ nextplayer: cur_player,
+ score: [points[1], points[2]],
+ location: [r, c],
+ orientation: o
+ };
+ if (o == "h") {
+ if (cell.t != 0) {
+ return;
+ }
+ cell.t = cur_player;
+ // Above
+ if (r > 0) {
+ if (data[r - 1][c].l != 0
+ && data[r - 1][c + 1].l != 0
+ && data[r - 1][c].t != 0
+ && data[r][c].t != 0) {
+ won_cell = true;
+ points[cur_player] += 1;
+ data[r - 1][c].p = cur_player;
+ }
+ }
+ // Below
+ if (r < nb_rows) {
+ if (data[r][c].l != 0
+ && data[r][c + 1].l != 0
+ && data[r][c].t != 0
+ && data[r + 1][c].t != 0) {
+ won_cell = true;
+ points[cur_player] += 1;
+ data[r][c].p = cur_player;
+ }
+ }
+ }
+
+ if (o == "v") {
+ if (cell.l != 0) {
+ return;
+ }
+ cell.l = cur_player;
+ // Left
+ if (c > 0) {
+ if (data[r][c - 1].l != 0
+ && data[r][c].l != 0
+ && data[r][c - 1].t != 0
+ && data[r + 1][c - 1].t != 0) {
+ won_cell = true;
+ points[cur_player] += 1;
+ data[r][c - 1].p = cur_player;
+ }
+ }
+ // Right
+ if (c < nb_cols) {
+ if (data[r][c].l != 0
+ && data[r][c + 1].l != 0
+ && data[r][c].t != 0
+ && data[r + 1][c].t != 0) {
+ won_cell = true;
+ points[cur_player] += 1;
+ data[r][c].p = cur_player;
+ }
+ }
+ }
+
+ msg["score"] = [points[1], points[2]];
+
+ if (!won_cell) {
+ cur_player = 3 - cur_player;
+ msg.nextplayer = cur_player;
+ }
+ update_board();
+ if (points[1] + points[2] == nb_cols * nb_rows) {
+ // Game over
+ var winner = 1
+ if (points[2] == points[1]) {
+ winner = 0;
+ }
+ if (points[2] > points[1]) {
+ winner = 2;
+ }
+ cur_ended = true;
+ msg.type = "end";
+ msg.nextplayer = 0;
+ msg.winner = winner;
+ }
+ send_to_agents(msg);
+}
+
+var field_margin = 10;
+var cell_width = 40;
+var cell_margin = 4;
+var player_height = 40;
+var width = 400;
+var height = 600;
+var line_width = 5;
+
+var player_color = [
+ "#E6E6E6",
+ "#FC6666",
+ "#0F80FF"
+];
+
+var svg = d3.select("#playing-area").append("svg")
+ .attr("width", width)
+ .attr("height", height)
+ .append("g")
+ .attr("transform", "translate("+field_margin+","+field_margin+")");
+
+var player = svg.append("g")
+ .attr("class", "player")
+ .attr("transform", "translate(0,10)");
+
+var field = svg.append("g")
+ .attr("class", "field")
+ .attr("transform", "translate(0,"+player_height+")");
+
+
+function update_board() {
+ // PLAYERS - enter & update
+ var player_text = player.selectAll("text")
+ .data([cur_player, cur_player]);
+
+ player_text = player_text.enter().append("text")
+ .attr("x", function(c, i) { return i * 100;})
+ .merge(player_text)
+ .text(function(c, i) {return "Player " + (i + 1) + ": "+points[i + 1];})
+ .attr("fill", function(c, i) {
+ if (c == i + 1) {
+ return player_color[c];
+ } else {
+ return player_color[0];
+ }
+ });
+
+ // ROWS - enter & update
+ var rows = field.selectAll(".row")
+ .data(data)
+ .attr("fill", function() {return null;});
+
+ rows.exit().remove();
+
+ rows = rows.enter().append("g")
+ .attr("class", "row")
+ .attr("transform", function(row, i) {return "translate(0," + cell_width * i + ")";})
+ .merge(rows);
+
+ // COLS - enter & update
+ var cols = rows.selectAll(".col")
+ .data(function(col) {return col;});
+
+ cols.exit().remove();
+
+ var cols_enter = cols.enter().append("g")
+ .attr("class", "col")
+ .attr("transform", function(col, ri) {return "translate("+cell_width * ri+",0)";});
+
+ // CELL - enter
+ cols_enter.append("rect")
+ .attr("class", "cell")
+ .attr("rx", cell_margin)
+ .attr("ry", cell_margin)
+ .attr("opacity", 0.25)
+ .attr("x", cell_margin)
+ .attr("y", cell_margin)
+ .attr("width", cell_width - 2*cell_margin)
+ .attr("height", cell_width - 2*cell_margin);
+
+ // HLINE - enter
+ cols_enter.append("line")
+ .attr("class", "hline")
+ .attr("x1", function(cell, ci) {return cell_margin;})
+ .attr("x2", function(cell, ci) {return cell_width - cell_margin;})
+ .attr("y1", 0)
+ .attr("y2", 0)
+ .attr("stroke-linecap", "round")
+ .attr("stroke", function(cell) {return player_color[cell.t];});
+
+ cols_enter.append("path")
+ .attr("d", "M"+cell_margin+",0"+
+ "L"+(cell_width/2)+",-"+(cell_width/3)+
+ "L"+(cell_width-cell_margin)+",0"+
+ "L"+(cell_width/2)+","+(cell_width/3)+"Z")
+ .attr("stroke", "black")
+ .attr("stroke-width", 2)
+ .attr("opacity", "0")
+ .on("click", function(cell) {
+ if (agents[cur_player].active == true) {
+ console.log("Ignoring click, automated agent")
+ } else {
+ user_click(cell, "h");
+ }
+ });
+
+ // VLINE - enter
+ cols_enter.append("line")
+ .attr("class", "vline")
+ .attr("y1", function(cell, ci) {return cell_margin;})
+ .attr("y2", function(cell, ci) {return cell_width - cell_margin;})
+ .attr("x1", 0)
+ .attr("x2", 0)
+ .attr("stroke-linecap", "round")
+ .attr("stroke", function(cell) {return player_color[cell.l];});
+
+ cols_enter.append("path")
+ .attr("d", "M0,"+cell_margin+
+ "L-"+(cell_width/3)+","+(cell_width/2)+
+ "L0,"+(cell_width-cell_margin)+
+ "L"+(cell_width/3)+","+(cell_width/2)+"Z")
+ .attr("stroke", "black")
+ .attr("stroke-width", 2)
+ .attr("opacity", "0")
+ .on("click", function(cell) {
+ if (agents[cur_player].active == true) {
+ console.log("Ignoring click, automated agent");
+ } else {
+ user_click(cell, "v");
+ }
+ });
+
+ cols = cols_enter
+ .merge(cols);
+
+ // HLINE - update
+ cols.selectAll(".hline")
+ .attr("stroke-width", function(cell) {
+ if (typeof(cell.t) == "undefined") {
+ return 0;
+ }
+ return line_width;
+ })
+ .attr("stroke", function(cell) {return player_color[cell.t];});
+
+ // VLINE - update
+ cols.selectAll(".vline")
+ .attr("stroke-width", function(cell, ci) {
+ if (typeof(cell.l) == "undefined") {
+ return 0;
+ }
+ return line_width;
+ })
+ .attr("stroke", function(cell) {return player_color[cell.l];});
+
+ // CELL - update
+ cols.selectAll(".cell")
+ .attr("fill", function(cell) {
+ if (cell.p == undefined) {
+ return "white";
+ }
+ return player_color[cell.p];
+ });
+}
+
+
+// AGENT CONNECTIONS
+
+var agents = [
+ {},
+ {address: undefined, active: false, socket: undefined},
+ {address: undefined, active: false, socket: undefined}
+];
+
+var msg_queue = [];
+
+
+function start_connections() {
+ for (var i=1; i<3; i++) {
+ agents[i] = {address:undefined, active: false, socket: undefined};
+ var address = document.getElementById('agent'+i).value;
+ if (address != "") {
+ //console.log("Starting websocket for agent "+i+" on address "+address);
+ var agent = agents[i];
+ agent.address = address;
+ agent.socket = new WebSocket(address);
+ agent.socket.onopen = (function (ii, iagent) { return function(event) {
+ console.log("Agent "+ii+" connected")
+ iagent.active = true;
+ iagent.socket.onmessage = function(event) {
+ var msg = JSON.parse(event.data);
+ //console.log("Get msg from agent "+ii, msg);
+ if (msg.type == "action") {
+ if (cur_player == ii) {
+ console.log("Received action from ACTIVE player "+ii, msg);
+ user_click(data[msg.location[0]][msg.location[1]], msg.orientation);
+ } else {
+ console.log("Received action from NON-ACTIVE player "+ii, msg);
+ }
+ }
+ return false;
+ };
+ iagent.socket.onclose = function(event) {
+ console.log("Closing connection to agent "+ii);
+ };
+ iagent.socket.onerror = function(event) {
+ console.log("Error on connection to agent "+ii, event);
+ };
+ msg = {
+ "type": "start",
+ "player": ii,
+ "timelimit": timelimit,
+ "game": cur_game,
+ "grid": [nb_rows, nb_cols]
+ };
+ iagent.socket.send(JSON.stringify(msg));
+ };}(i, agent));
+ }
+ }
+}
+
+
+function send_to_agents(msg) {
+ msg_queue.push(JSON.stringify(msg));
+ try_sending_to_agents();
+}
+
+
+function try_sending_to_agents() {
+ var all_connected = true;
+ for (var i=1; i<3; i++) {
+ if (agents[i].address !== undefined && agents[i].active == false) {
+ all_connected = false;
+ break;
+ }
+ }
+ if (!all_connected) {
+ // Wait until all are connected
+ setTimeout(try_sending_to_agents, 100);
+ } else {
+ if (msg_queue.length == 0 ) {
+ return;
+ }
+ var msg = msg_queue.shift();
+ console.log("Send msg to agents", msg);
+ for (var i=1; i<3; i++) {
+ if (agents[i].active == true) {
+ agents[i].socket.send(msg);
+ }
+ }
+ }
+}
+
+
+// STARTUP
+
+function restart() {
+ restart_game();
+ update_board();
+ start_connections();
+}
+
+var restartbtn = document.getElementById("restart-btn");
+restartbtn.onclick = function() {
+ console.log("Restart game");
+ restart();
+};
+
+restart();
diff --git a/python/evaluate.py b/python/evaluate.py
new file mode 100644
index 0000000..fb60211
--- /dev/null
+++ b/python/evaluate.py
@@ -0,0 +1,240 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+"""
+dotsandboxescompete.py
+
+Template for the Machine Learning Project course at KU Leuven (2017-2018)
+of Hendrik Blockeel and Wannes Meert.
+
+Copyright (c) 2018 KU Leuven. All rights reserved.
+"""
+
+import sys
+import argparse
+import logging
+import asyncio
+import websockets
+import json
+from collections import defaultdict
+import random
+import uuid
+import time
+import csv
+
+logger = logging.getLogger(__name__)
+OUTPUTWRITER = None
+
+
+def start_competition(address1, address2, nb_rows, nb_cols, timelimit):
+ asyncio.get_event_loop().run_until_complete(connect_agent(address1, address2, nb_rows, nb_cols, timelimit))
+
+
+async def connect_agent(uri1, uri2, nb_rows, nb_cols, timelimit):
+ cur_game = str(uuid.uuid4())
+ winner = None
+ cells = []
+ cur_player = 1
+ points = [0, 0, 0]
+ timings = [None, [], []]
+
+ for ri in range(nb_rows + 1):
+ columns = []
+ for ci in range(nb_cols + 1):
+ columns.append({"v":0, "h":0, "p":0})
+ cells.append(columns)
+
+ logger.info("Connecting to {}".format(uri1))
+ async with websockets.connect(uri1) as websocket1:
+ logger.info("Connecting to {}".format(uri2))
+ async with websockets.connect(uri2) as websocket2:
+ logger.info("Connected")
+
+ # Start game
+ msg = {
+ "type": "start",
+ "player": 1,
+ "timelimit": timelimit,
+ "game": cur_game,
+ "grid": [nb_rows, nb_cols]
+ }
+ await websocket1.send(json.dumps(msg))
+ msg["player"] = 2
+ await websocket2.send(json.dumps(msg))
+
+ # Run game
+ while winner is None:
+ ask_time = time.time()
+ logger.info("Waiting for player {}".format(cur_player))
+ if cur_player == 1:
+ msg = await websocket1.recv()
+ else:
+ msg = await websocket2.recv()
+ recv_time = time.time()
+ diff_time = recv_time - ask_time
+ timings[cur_player].append(diff_time)
+ logger.info("Message received after (s): {}".format(diff_time))
+ try:
+ msg = json.loads(msg)
+ except json.decoder.JSONDecodeError as err:
+ logger.debug(err)
+ continue
+ if msg["type"] != "action":
+ logger.error("Unknown message: {}".format(msg))
+ continue
+ r, c = msg["location"]
+ o = msg["orientation"]
+ next_player = user_action(r, c, o, cur_player,
+ cells, points,
+ nb_rows, nb_cols)
+ if points[1] + points[2] == nb_cols * nb_rows:
+ # Game over
+ winner = 1
+ if points[2] == points[1]:
+ winner = 0
+ if points[2] > points[1]:
+ winner = 2
+ else:
+ msg = {
+ "type": "action",
+ "game": cur_game,
+ "player": cur_player,
+ "nextplayer": next_player,
+ "score": [points[1], points[2]],
+ "location": [r, c],
+ "orientation": o
+ }
+ await websocket1.send(json.dumps(msg))
+ await websocket2.send(json.dumps(msg))
+
+ cur_player = next_player
+
+ # End game
+ logger.info("Game ended: points1={} - points2={} - winner={}".format(points[1], points[2], winner))
+ msg = {
+ "type": "end",
+ "game": cur_game,
+ "player": cur_player,
+ "nextplayer": 0,
+ "score": [points[1], points[2]],
+ "location": [r, c],
+ "orientation": o,
+ "winner": winner
+ }
+ await websocket1.send(json.dumps(msg))
+ await websocket2.send(json.dumps(msg))
+
+ # Timings
+ for i in [1, 2]:
+ logger.info("Timings: player={} - avg={} - min={} - max={}"\
+ .format(i,
+ sum(timings[i])/len(timings[i]),
+ min(timings[i]),
+ max(timings[i])))
+
+ logger.info("Closed connections")
+ OUTPUTWRITER.writeln({'score1': points[1], 'score2': points[2], 'winner': winner})
+
+
+def user_action(r, c, o, cur_player, cells, points, nb_rows, nb_cols):
+ logger.info("User action: player={} - r={} - c={} - o={}".format(cur_player, r, c, o))
+ next_player = cur_player
+ won_cell = False
+ cell = cells[r][c]
+ if o == "h":
+ if cell["h"] != 0:
+ return cur_player
+ cell["h"] = cur_player
+ # Above
+ if r > 0:
+ if cells[r - 1][c]["v"] != 0 \
+ and cells[r - 1][c + 1]["v"] != 0 \
+ and cells[r - 1][c]["h"] != 0 \
+ and cells[r][c]["h"] != 0:
+ won_cell = True
+ points[cur_player] += 1
+ cells[r - 1][c]["p"] = cur_player
+ # Below
+ if r < nb_rows:
+ if cells[r][c]["v"] != 0 \
+ and cells[r][c + 1]["v"] != 0 \
+ and cells[r][c]["h"] != 0 \
+ and cells[r + 1][c]["h"] != 0:
+ won_cell = True
+ points[cur_player] += 1
+ cells[r][c]["p"] = cur_player
+
+ if o == "v":
+ if cell["v"] != 0:
+ return cur_player
+ cell["v"] = cur_player;
+ # Left
+ if c > 0:
+ if cells[r][c - 1]["v"] != 0 \
+ and cells[r][c]["v"] != 0 \
+ and cells[r][c - 1]["h"] != 0 \
+ and cells[r + 1][c - 1]["h"] != 0:
+ won_cell = True
+ points[cur_player] += 1
+ cells[r][c - 1]["p"] = cur_player
+ # Right
+ if c < nb_cols:
+ if cells[r][c]["v"] != 0 \
+ and cells[r][c + 1]["v"] != 0 \
+ and cells[r][c]["h"] != 0 \
+ and cells[r + 1][c]["h"] != 0:
+ won_cell = True
+ points[cur_player] += 1
+ cells[r][c]["p"] = cur_player
+
+ if not won_cell:
+ next_player = 3 - cur_player
+ else:
+ next_player = cur_player
+ print("Update points: player1={} - player2={}".format(points[1], points[2]))
+ return next_player
+
+
+def main(argv=None):
+ parser = argparse.ArgumentParser(description='Start agent to play Dots and Boxes')
+ parser.add_argument('--verbose', '-v', action='count', default=0, help='Verbose output')
+ parser.add_argument('--quiet', '-q', action='count', default=0, help='Quiet output')
+ parser.add_argument('--cols', '-c', type=int, default=2, help='Number of columns')
+ parser.add_argument('--rows', '-r', type=int, default=2, help='Number of rows')
+ parser.add_argument('--timelimit', '-t', type=float, default=0.5, help='Time limit per request in seconds')
+ parser.add_argument('--number', '-n', type=int, default=1, help='Number of games that will be played for the evaluation')
+ parser.add_argument('--output', '-o', default="output.csv", help='File where game results will be written to')
+ parser.add_argument('agents', nargs=2, metavar='AGENT', help='Websockets addresses for agents')
+ args = parser.parse_args(argv)
+
+ logger.setLevel(max(logging.INFO - 10 * (args.verbose - args.quiet), logging.DEBUG))
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ global OUTPUTWRITER
+ OUTPUTWRITER = OutputWriter(args.output)
+
+ for i in range(args.number):
+ start_competition(args.agents[0], args.agents[1], args.rows, args.cols, args.timelimit)
+
+ OUTPUTWRITER.close()
+
+
+class OutputWriter:
+ def __init__(self, outputfile):
+ self.csvfile = open(outputfile, 'w', newline='')
+ try:
+ fieldnames = ['score1', 'score2', 'winner']
+ self.writer = csv.DictWriter(self.csvfile, fieldnames=fieldnames)
+ self.writer.writeheader()
+ except IOError:
+ self.csvfile.close()
+
+ def writeln(self, csvdict):
+ self.writer.writerow(csvdict)
+
+ def close(self):
+ self.csvfile.close()
+
+
+if __name__ == "__main__":
+ sys.exit(main())
+
diff --git a/python/start.sh b/python/start.sh
new file mode 100755
index 0000000..6be69a2
--- /dev/null
+++ b/python/start.sh
@@ -0,0 +1,7 @@
+(cd dotsandboxes; python3 dotsandboxesserver.py 8080) &
+python3 agent.py 10001 &
+python3 agent.py 10002 &
+read -p "Press enter to close all programs."
+
+trap "exit" INT TERM
+trap "kill 0" EXIT
diff --git a/runnable_agent/Agent.jar b/runnable_agent/Agent.jar
new file mode 100644
index 0000000..6b969cc
--- /dev/null
+++ b/runnable_agent/Agent.jar
Binary files differ
diff --git a/runnable_agent/dotsandboxesagent b/runnable_agent/dotsandboxesagent
new file mode 100644
index 0000000..3788e28
--- /dev/null
+++ b/runnable_agent/dotsandboxesagent
@@ -0,0 +1,3 @@
+#!/bin/bash
+cd "$(dirname "$0")"
+java -jar Agent.jar -p $@ \ No newline at end of file
diff --git a/runnable_agent/final_ann b/runnable_agent/final_ann
new file mode 100644
index 0000000..7be12c1
--- /dev/null
+++ b/runnable_agent/final_ann
Binary files differ
diff --git a/sandbox/game.py b/sandbox/game.py
deleted file mode 100644
index 003d943..0000000
--- a/sandbox/game.py
+++ /dev/null
@@ -1,282 +0,0 @@
-import pygame
-import numpy as np
-import sys
-
-
-class Game:
- def __init__(self):
- self.grid_size = 10 # default
- if len(sys.argv) > 1:
- self.grid_size = int(sys.argv[1])
-
- # It turns out that there are nice structures when setting ~0.75 walls per slot
- self.start_walls = int(0.75 * self.grid_size ** 2)
-
- self.accept_clicks = True
-
- # variables for the boxes for each player (x would be computer)
- self.a_boxes = 0
- self.b_boxes = 0
- self.x_boxes = 0
-
- self.turn = "X"
- self.caption = "'s turn "
-
- # 0 empty 1 is A 2 is B and 3 is X
- self.grid_status = np.zeros((self.grid_size, self.grid_size), np.int)
- self.upper_walls_set_flags = np.zeros((self.grid_size, self.grid_size), np.dtype(bool))
- self.left_walls_set_flags = np.zeros((self.grid_size, self.grid_size), np.dtype(bool))
-
- # set the outer walls
- for column in range(self.grid_size):
- for row in range(self.grid_size):
- if column == 0:
- self.left_walls_set_flags[column][row] = True
- if row == 0:
- self.upper_walls_set_flags[column][row] = True
-
- # initialize pygame
- pygame.init()
-
- # set the display size (one slot has 30x30 pixels; Walls: 4x26 Box: 26x26)
- self.screen = pygame.display.set_mode([30 * self.grid_size + 4, 30 * self.grid_size + 4])
-
- # load all images
- self.empty = pygame.image.load("pics/empty.png")
- self.A = pygame.image.load("pics/A.png")
- self.B = pygame.image.load("pics/B.png")
- self.X = pygame.image.load("pics/X.png")
- self.block = pygame.image.load("pics/block.png")
- self.lineX = pygame.image.load("pics/lineX.png")
- self.lineXempty = pygame.image.load("pics/lineXempty.png")
- self.lineY = pygame.image.load("pics/lineY.png")
- self.lineYempty = pygame.image.load("pics/lineYempty.png")
-
- tries = 0
- # set the start walls randomly but do not create any opportunity to directly close boxes
- while self.start_walls > 0 and tries < 4*self.grid_size**2:
- x = np.random.randint(self.grid_size)
- y = np.random.randint(self.grid_size)
- up = np.random.randint(2)
-
- if up:
- if not self.upper_walls_set_flags[x][y] \
- and self.get_number_of_walls(x, y) < 2 \
- and self.get_number_of_walls(x, y - 1) < 2:
- self.upper_walls_set_flags[x][y] = True
- self.start_walls -= 1
- else:
- if not self.left_walls_set_flags[x][y] \
- and self.get_number_of_walls(x, y) < 2 \
- and self.get_number_of_walls(x - 1, y) < 2:
- self.left_walls_set_flags[x][y] = True
- self.start_walls -= 1
-
- tries += 1
-
- # now it's the first players turn
- self.turn = "A"
- self.show()
-
- while True:
- # go through all events and check the types
- for event in pygame.event.get():
- # quit the game when the player closes it
- if event.type == pygame.QUIT:
- pygame.quit()
- exit(0)
-
- # left click
- elif event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed()[0]:
- if not self.accept_clicks:
- continue
-
- # get the current position of the cursor
- x = pygame.mouse.get_pos()[0]
- y = pygame.mouse.get_pos()[1]
-
- # check whether it was a not set wall that was clicked
- wall_x, wall_y = self.get_wall(x, y)
-
- if not (wall_x >= 0 and wall_y >= 0):
- continue
-
- upper_wall = wall_y % 30 == 0
-
- if upper_wall:
- if not self.upper_walls_set_flags[wall_x//30][wall_y//30]:
- self.upper_walls_set_flags[wall_x//30][wall_y//30] = True
- self.screen.blit(self.lineX, (wall_x, wall_y))
- else:
- continue
- else:
- if not self.left_walls_set_flags[wall_x//30][wall_y//30]:
- self.left_walls_set_flags[wall_x//30][wall_y//30] = True
- self.screen.blit(self.lineY, (wall_x, wall_y))
- else:
- continue
-
- if not self.set_all_slots() > 0:
- if self.turn == "A":
- self.turn = "B"
- elif self.turn == "B":
- self.turn = "A"
-
- if self.won():
- self.accept_clicks = False
-
- else:
-
- # set the display caption
- pygame.display.set_caption(self.turn + self.caption + " A:" + str(
- self.a_boxes) + " B:" + str(self.b_boxes))
-
- # update the players screen
- pygame.display.flip()
-
- def get_number_of_walls(self, slot_column, slot_row):
- """
- Get the number of set walls around the passed slot
- :param slot_column: x of the slot
- :param slot_row: y of the slot
- :return: number of set walls
- """
- number_of_walls = 0
-
- if slot_column == self.grid_size - 1:
- number_of_walls += 1
- elif self.left_walls_set_flags[slot_column + 1][slot_row]:
- number_of_walls += 1
-
- if slot_row == self.grid_size - 1:
- number_of_walls += 1
- elif self.upper_walls_set_flags[slot_column][slot_row + 1]:
- number_of_walls += 1
-
- if self.left_walls_set_flags[slot_column][slot_row]:
- number_of_walls += 1
-
- if self.upper_walls_set_flags[slot_column][slot_row]:
- number_of_walls += 1
-
- return number_of_walls
-
- @staticmethod
- def get_wall(pos_x, pos_y):
- rest_x = pos_x % 30
- rest_y = pos_y % 30
-
- wall_slot_x = pos_x//30
- wall_slot_y = pos_y//30
-
- # in a corner
- if rest_x < 4 and rest_y < 4:
- return -1, -1
-
- if rest_x < 4:
- # is left wall of the slot
- return wall_slot_x*30, wall_slot_y*30 + 4
-
- if rest_y < 4:
- # is upper wall of the slot
- return wall_slot_x*30 + 4, wall_slot_y*30
-
- # inside the box => not a wall
- return -1, -1
-
- def set_all_slots(self):
- """
- Find all newly closed boxes and close them for the current player
- :return: number of closed boxes
- """
- to_return = 0
-
- for column_ in range(self.grid_size):
- for row_ in range(self.grid_size):
- if self.grid_status[column_][row_] != 0 or self.get_number_of_walls(column_, row_) < 4:
- continue
-
- if self.turn == "A":
- self.grid_status[column_][row_] = 1
- self.screen.blit(self.A, (column_ * 30 + 4, row_ * 30 + 4))
- self.a_boxes += 1
- elif self.turn == "B":
- self.grid_status[column_][row_] = 2
- self.screen.blit(self.B, (column_ * 30 + 4, row_ * 30 + 4))
- self.b_boxes += 1
- elif self.turn == "X":
- self.grid_status[column_][row_] = 3
- self.screen.blit(self.X, (column_ * 30 + 4, row_ * 30 + 4))
- self.x_boxes += 1
-
- to_return += 1
-
- return to_return
-
- def won(self):
- """
- Check whether the game was finished
- If so change the caption to display the winner
- :return: won or not
- """
- if self.a_boxes + self.b_boxes + self.x_boxes == self.grid_size ** 2:
- if self.a_boxes < self.b_boxes:
- won_caption = "Player B won! Congrats"
- elif self.b_boxes < self.a_boxes:
- won_caption = "Player A won! Congrats"
- else:
- won_caption = "It's a tie!"
-
- # set the display caption
- pygame.display.set_caption(won_caption)
-
- # update the players screen
- pygame.display.flip()
-
- return True
- else:
- return False
-
- def show(self):
- """
- Reload the screen
- Use the current grid and wall information to
- update the players screen
- """
- self.screen.fill(0)
-
- # loop over all slots
- for column in range(self.grid_size):
- for row in range(self.grid_size):
- x, y = column * 30, row * 30
- self.screen.blit(self.block, (x, y))
- x += 4
- if not self.upper_walls_set_flags[column][row]:
- self.screen.blit(self.lineXempty, (x, y))
- else:
- self.screen.blit(self.lineX, (x, y))
- x -= 4
- y += 4
- if not self.left_walls_set_flags[column][row]:
- self.screen.blit(self.lineYempty, (x, y))
- else:
- self.screen.blit(self.lineY, (x, y))
-
- # calculate x and y in pixels
- x, y = column * 30 + 4, row * 30 + 4
-
- if self.grid_status[column][row] == 0:
- self.screen.blit(self.empty, (x, y))
- elif self.grid_status[column][row] == 1:
- self.screen.blit(self.A, (x, y))
- elif self.grid_status[column][row] == 2:
- self.screen.blit(self.B, (x, y))
- elif self.grid_status[column][row] == 3:
- self.screen.blit(self.X, (x, y))
-
- pygame.display.set_caption(self.turn + self.caption + " A:" + str(self.a_boxes) + " B:" + str(
- self.b_boxes))
- pygame.display.flip()
-
-
-game = Game() # start a game \ No newline at end of file
diff --git a/sandbox/pics/A.png b/sandbox/pics/A.png
deleted file mode 100644
index 7d04d60..0000000
--- a/sandbox/pics/A.png
+++ /dev/null
Binary files differ
diff --git a/sandbox/pics/B.png b/sandbox/pics/B.png
deleted file mode 100644
index c9f6ec7..0000000
--- a/sandbox/pics/B.png
+++ /dev/null
Binary files differ
diff --git a/sandbox/pics/X.png b/sandbox/pics/X.png
deleted file mode 100644
index 64bcf7c..0000000
--- a/sandbox/pics/X.png
+++ /dev/null
Binary files differ
diff --git a/sandbox/pics/block.png b/sandbox/pics/block.png
deleted file mode 100644
index 8c2f9d8..0000000
--- a/sandbox/pics/block.png
+++ /dev/null
Binary files differ
diff --git a/sandbox/pics/empty.png b/sandbox/pics/empty.png
deleted file mode 100644
index 8d3daad..0000000
--- a/sandbox/pics/empty.png
+++ /dev/null
Binary files differ
diff --git a/sandbox/pics/lineX.png b/sandbox/pics/lineX.png
deleted file mode 100644
index cec7bc0..0000000
--- a/sandbox/pics/lineX.png
+++ /dev/null
Binary files differ
diff --git a/sandbox/pics/lineXempty.png b/sandbox/pics/lineXempty.png
deleted file mode 100644
index ff09abe..0000000
--- a/sandbox/pics/lineXempty.png
+++ /dev/null
Binary files differ
diff --git a/sandbox/pics/lineY.png b/sandbox/pics/lineY.png
deleted file mode 100644
index 32fe351..0000000
--- a/sandbox/pics/lineY.png
+++ /dev/null
Binary files differ
diff --git a/sandbox/pics/lineYempty.png b/sandbox/pics/lineYempty.png
deleted file mode 100644
index ef10800..0000000
--- a/sandbox/pics/lineYempty.png
+++ /dev/null
Binary files differ