mirror of
https://github.com/2OOP/pism.git
synced 2026-02-04 19:04:49 +00:00
Added a ChatGPT generated MiniMaxAI based on the old MiniMaxAI but with alpha-beta pruning and heuristics for Reversi
This commit is contained in:
@@ -3,6 +3,7 @@ package org.toop.app.gameControllers;
|
||||
import org.toop.app.canvas.TicTacToeBitCanvas;
|
||||
import org.toop.framework.gameFramework.model.game.threadBehaviour.ThreadBehaviour;
|
||||
import org.toop.framework.gameFramework.model.player.Player;
|
||||
import org.toop.game.gameThreads.LocalFixedRateThreadBehaviour;
|
||||
import org.toop.game.gameThreads.LocalThreadBehaviour;
|
||||
import org.toop.game.gameThreads.OnlineThreadBehaviour;
|
||||
import org.toop.game.games.tictactoe.BitboardTicTacToe;
|
||||
|
||||
@@ -18,6 +18,7 @@ import org.toop.app.widget.complex.PlayerInfoWidget;
|
||||
import org.toop.app.widget.complex.ViewWidget;
|
||||
import org.toop.app.widget.popup.ErrorPopup;
|
||||
import org.toop.app.widget.tutorial.*;
|
||||
import org.toop.game.players.MiniMaxAI;
|
||||
import org.toop.game.players.RandomAI;
|
||||
import org.toop.local.AppContext;
|
||||
|
||||
@@ -27,6 +28,7 @@ import javafx.scene.layout.VBox;
|
||||
import org.toop.local.AppSettings;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Random;
|
||||
|
||||
public class LocalMultiplayerView extends ViewWidget {
|
||||
private final GameInformation information;
|
||||
@@ -58,7 +60,7 @@ public class LocalMultiplayerView extends ViewWidget {
|
||||
if (information.players[0].isHuman) {
|
||||
players[0] = new LocalPlayer<>(information.players[0].name);
|
||||
} else {
|
||||
players[0] = new ArtificialPlayer<>(new RandomAI<BitboardTicTacToe>(), information.players[0].name);
|
||||
players[0] = new ArtificialPlayer<>(new MiniMaxAI<BitboardTicTacToe>(9), information.players[0].name);
|
||||
}
|
||||
if (information.players[1].isHuman) {
|
||||
players[1] = new LocalPlayer<>(information.players[1].name);
|
||||
@@ -86,12 +88,12 @@ public class LocalMultiplayerView extends ViewWidget {
|
||||
if (information.players[0].isHuman) {
|
||||
players[0] = new LocalPlayer<>(information.players[0].name);
|
||||
} else {
|
||||
players[0] = new ArtificialPlayer<>(new RandomAI<BitboardReversi>(), information.players[0].name);
|
||||
players[0] = new ArtificialPlayer<>(new RandomAI<BitboardReversi>(), "Random AI");
|
||||
}
|
||||
if (information.players[1].isHuman) {
|
||||
players[1] = new LocalPlayer<>(information.players[1].name);
|
||||
} else {
|
||||
players[1] = new ArtificialPlayer<>(new RandomAI<BitboardReversi>(), information.players[1].name);
|
||||
players[1] = new ArtificialPlayer<>(new MiniMaxAI<BitboardReversi>(6), "MiniMax");
|
||||
}
|
||||
if (AppSettings.getSettings().getTutorialFlag() && AppSettings.getSettings().getFirstReversi()) {
|
||||
new ShowEnableTutorialWidget(
|
||||
|
||||
@@ -24,7 +24,7 @@ public class LocalFixedRateThreadBehaviour<T extends TurnBasedGame<T>> extends A
|
||||
*
|
||||
* @param game the game instance
|
||||
*/
|
||||
public LocalFixedRateThreadBehaviour(T game, Consumer<Long> onSendMove) {
|
||||
public LocalFixedRateThreadBehaviour(T game) {
|
||||
super(game);
|
||||
}
|
||||
|
||||
|
||||
166
game/src/main/java/org/toop/game/players/MiniMaxAI.java
Normal file
166
game/src/main/java/org/toop/game/players/MiniMaxAI.java
Normal file
@@ -0,0 +1,166 @@
|
||||
package org.toop.game.players;
|
||||
|
||||
import org.toop.framework.gameFramework.GameState;
|
||||
import org.toop.framework.gameFramework.model.game.PlayResult;
|
||||
import org.toop.framework.gameFramework.model.game.TurnBasedGame;
|
||||
import org.toop.framework.gameFramework.model.player.AbstractAI;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
public class MiniMaxAI<T extends TurnBasedGame<T>> extends AbstractAI<T> {
|
||||
|
||||
private final int maxDepth;
|
||||
private final Random random = new Random();
|
||||
|
||||
public MiniMaxAI(int depth) {
|
||||
this.maxDepth = depth;
|
||||
}
|
||||
|
||||
public MiniMaxAI(MiniMaxAI<T> other) {
|
||||
this.maxDepth = other.maxDepth;
|
||||
}
|
||||
|
||||
@Override
|
||||
public MiniMaxAI<T> deepCopy() {
|
||||
return new MiniMaxAI<>(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMove(T game) {
|
||||
long legalMoves = game.getLegalMoves();
|
||||
if (legalMoves == 0) return 0;
|
||||
|
||||
List<Long> bestMoves = new ArrayList<>();
|
||||
int bestScore = Integer.MIN_VALUE;
|
||||
int aiPlayer = game.getCurrentTurn();
|
||||
|
||||
long movesLoop = legalMoves;
|
||||
while (movesLoop != 0) {
|
||||
long move = 1L << Long.numberOfTrailingZeros(movesLoop);
|
||||
T copy = game.deepCopy();
|
||||
PlayResult result = copy.play(move);
|
||||
|
||||
int score;
|
||||
switch (result.state()) {
|
||||
case WIN -> score = (result.player() == aiPlayer ? maxDepth : -maxDepth);
|
||||
case DRAW -> score = 0;
|
||||
default -> score = getMoveScore(copy, maxDepth - 1, false, aiPlayer, Integer.MIN_VALUE, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
if (score > bestScore) {
|
||||
bestScore = score;
|
||||
bestMoves.clear();
|
||||
bestMoves.add(move);
|
||||
} else if (score == bestScore) {
|
||||
bestMoves.add(move);
|
||||
}
|
||||
|
||||
movesLoop &= movesLoop - 1;
|
||||
}
|
||||
|
||||
long chosenMove = bestMoves.get(random.nextInt(bestMoves.size()));
|
||||
System.out.println("[DEBUG] Selected move: " + Long.toBinaryString(chosenMove) + " | score: " + bestScore);
|
||||
return chosenMove;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursive minimax with alpha-beta pruning and heuristic evaluation.
|
||||
*
|
||||
* @param game Current game state
|
||||
* @param depth Remaining depth
|
||||
* @param maximizing True if AI is maximizing, false if opponent
|
||||
* @param aiPlayer AI's player index
|
||||
* @param alpha Alpha value
|
||||
* @param beta Beta value
|
||||
* @return score of the position
|
||||
*/
|
||||
private int getMoveScore(T game, int depth, boolean maximizing, int aiPlayer, int alpha, int beta) {
|
||||
long legalMoves = game.getLegalMoves();
|
||||
|
||||
// Terminal state
|
||||
PlayResult lastResult = null;
|
||||
if (legalMoves == 0) {
|
||||
lastResult = new PlayResult(GameState.DRAW, -1);
|
||||
}
|
||||
|
||||
// If the game is over or depth limit reached, evaluate
|
||||
if (depth <= 0 || legalMoves == 0) {
|
||||
if (lastResult != null) return 0;
|
||||
return evaluateBoard(game, aiPlayer);
|
||||
}
|
||||
|
||||
int bestScore = maximizing ? Integer.MIN_VALUE : Integer.MAX_VALUE;
|
||||
long movesLoop = legalMoves;
|
||||
|
||||
while (movesLoop != 0) {
|
||||
long move = 1L << Long.numberOfTrailingZeros(movesLoop);
|
||||
T copy = game.deepCopy();
|
||||
PlayResult result = copy.play(move);
|
||||
|
||||
int score;
|
||||
switch (result.state()) {
|
||||
case WIN -> score = (result.player() == aiPlayer ? depth : -depth);
|
||||
case DRAW -> score = 0;
|
||||
default -> score = getMoveScore(copy, depth - 1, !maximizing, aiPlayer, alpha, beta);
|
||||
}
|
||||
|
||||
if (maximizing) {
|
||||
bestScore = Math.max(bestScore, score);
|
||||
alpha = Math.max(alpha, bestScore);
|
||||
} else {
|
||||
bestScore = Math.min(bestScore, score);
|
||||
beta = Math.min(beta, bestScore);
|
||||
}
|
||||
|
||||
// Alpha-beta pruning
|
||||
if (beta <= alpha) break;
|
||||
|
||||
movesLoop &= movesLoop - 1;
|
||||
}
|
||||
|
||||
return bestScore;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple heuristic evaluation for Reversi-like games.
|
||||
* Positive = good for AI, Negative = good for opponent.
|
||||
*
|
||||
* @param game Game state
|
||||
* @param aiPlayer AI's player index
|
||||
* @return heuristic score
|
||||
*/
|
||||
private int evaluateBoard(T game, int aiPlayer) {
|
||||
long[] board = game.getBoard();
|
||||
int aiCount = 0;
|
||||
int opponentCount = 0;
|
||||
|
||||
// Count pieces for AI vs opponent
|
||||
for (int i = 0; i < board.length; i++) {
|
||||
long bits = board[i];
|
||||
for (int j = 0; j < 64; j++) {
|
||||
if ((bits & (1L << j)) != 0) {
|
||||
// Assume player 0 occupies even indices, player 1 occupies odd
|
||||
if ((i * 64 + j) % game.getPlayerCount() == aiPlayer) aiCount++;
|
||||
else opponentCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mobility (number of legal moves)
|
||||
int mobility = Long.bitCount(game.getLegalMoves());
|
||||
|
||||
// Corner control (top-left, top-right, bottom-left, bottom-right)
|
||||
int corners = 0;
|
||||
long[] cornerMasks = {1L << 0, 1L << 7, 1L << 56, 1L << 63};
|
||||
for (long mask : cornerMasks) {
|
||||
for (long b : board) {
|
||||
if ((b & mask) != 0) corners += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Weighted sum
|
||||
return (aiCount - opponentCount) + 2 * mobility + 5 * corners;
|
||||
}
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
package org.toop.game.tictactoe;
|
||||
|
||||
import org.junit.jupiter.api.DisplayName;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.toop.framework.gameFramework.model.player.Player;
|
||||
import org.toop.game.games.tictactoe.TicTacToeAIR;
|
||||
import org.toop.game.games.tictactoe.TicTacToeR;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
|
||||
final class TicTacToeAIRTest {
|
||||
|
||||
private final TicTacToeAIR ai = new TicTacToeAIR();
|
||||
|
||||
// Helper: play multiple moves in sequence on a fresh board
|
||||
private TicTacToeR playSequence(int... moves) {
|
||||
TicTacToeR game = new TicTacToeR(new Player[2]);
|
||||
for (int move : moves) {
|
||||
game.play(move);
|
||||
}
|
||||
return game;
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("AI first move must choose a corner")
|
||||
void testFirstMoveIsCorner() {
|
||||
TicTacToeR game = new TicTacToeR(new Player[2]);
|
||||
int move = ai.getMove(game);
|
||||
|
||||
assertTrue(
|
||||
move == 0 || move == 2 || move == 6 || move == 8,
|
||||
"AI should pick a corner as first move"
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("AI doesn't make losing move in specific situation")
|
||||
void testWinningMove(){
|
||||
TicTacToeR game = playSequence(new int[] { 0, 4, 5, 3, 6, 1, 7});
|
||||
int move = ai.getMove(game);
|
||||
|
||||
assertEquals(8, move);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
@DisplayName("AI takes immediate winning move")
|
||||
void testAiTakesWinningMove() {
|
||||
// X = AI, O = opponent
|
||||
// Board state (X to play):
|
||||
// X | X | .
|
||||
// O | O | .
|
||||
// . | . | .
|
||||
//
|
||||
// AI must play 2 (top-right) to win.
|
||||
TicTacToeR game = playSequence(
|
||||
0, 3, // X, O
|
||||
1, 4 // X, O
|
||||
);
|
||||
|
||||
int move = ai.getMove(game);
|
||||
assertEquals(2, move, "AI must take the winning move at index 2");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("AI blocks opponent's winning move")
|
||||
void testAiBlocksOpponent() {
|
||||
// Opponent threatens to win:
|
||||
// X | . | .
|
||||
// O | O | .
|
||||
// . | . | X
|
||||
// O is about to win at index 5; AI must block it.
|
||||
|
||||
TicTacToeR game = playSequence(
|
||||
0, 3, // X, O
|
||||
8, 4 // X, O (O threatens at 5)
|
||||
);
|
||||
|
||||
int move = ai.getMove(game);
|
||||
assertEquals(5, move, "AI must block opponent at index 5");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("AI returns -1 when no legal moves exist")
|
||||
void testNoMovesAvailable() {
|
||||
TicTacToeR full = new TicTacToeR(new Player[2]);
|
||||
// Fill board alternating
|
||||
for (int i = 0; i < 9; i++) full.play(i);
|
||||
|
||||
int move = ai.getMove(full);
|
||||
assertEquals(-1, move, "AI should return -1 when board is full");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Minimax depth does not cause crashes and produces valid move")
|
||||
void testDepthStability() {
|
||||
TicTacToeR game = playSequence(0, 4); // Simple mid-game state
|
||||
int move = ai.getMove(game);
|
||||
|
||||
assertTrue(move >= -1 && move <= 8, "AI must return a valid move index");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("AI chooses the optimal forced draw move")
|
||||
void testForcedDrawScenario() {
|
||||
// Scenario where only one move avoids immediate loss:
|
||||
//
|
||||
// X | O | X
|
||||
// X | O | .
|
||||
// O | X | .
|
||||
//
|
||||
// Legal moves: 5, 8
|
||||
// Only move 5 avoids losing.
|
||||
TicTacToeR game = new TicTacToeR(new Player[2]);
|
||||
int[] moves = {0,1,2,4,3,6,7}; // Hard-coded board setup
|
||||
for (int m : moves) game.play(m);
|
||||
|
||||
int move = ai.getMove(game);
|
||||
assertEquals(5, move, "AI must choose the only move that avoids losing");
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user