Skip to content

Commit

Permalink
Introduce Fractional LMR
Browse files Browse the repository at this point in the history
  • Loading branch information
xu-shawn authored and PikaCat-OuO committed Nov 14, 2024
1 parent 2cdfdcf commit 85a83ba
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 16 deletions.
8 changes: 4 additions & 4 deletions src/history.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,11 +128,11 @@ enum StatsType {
// during the current search, and is used for reduction and move ordering decisions.
// It uses 2 tables (one for each color) indexed by the move's from and to squares,
// see https://www.chessprogramming.org/Butterfly_Boards (~11 elo)
using ButterflyHistory = Stats<int16_t, 7183, COLOR_NB, int(SQUARE_NB) * int(SQUARE_NB)>;
using ButterflyHistory = Stats<int16_t, 7183, COLOR_NB, 1 << 14>;

// LowPlyHistory is adressed by play and move's from and to squares, used
// to improve move ordering near the root
using LowPlyHistory = Stats<int16_t, 7183, LOW_PLY_HISTORY_SIZE, int(SQUARE_NB) * int(SQUARE_NB)>;
using LowPlyHistory = Stats<int16_t, 7183, LOW_PLY_HISTORY_SIZE, 1 << 14>;

// CapturePieceToHistory is addressed by a move's [piece][to][captured piece type]
using CapturePieceToHistory = Stats<int16_t, 10692, PIECE_NB, SQUARE_NB, PIECE_TYPE_NB>;
Expand All @@ -155,8 +155,8 @@ using PawnHistory = Stats<int16_t, 8192, PAWN_HISTORY_SIZE, PIECE_NB, SQUARE_NB>
// see https://www.chessprogramming.org/Static_Evaluation_Correction_History
enum CorrHistType {
Pawn, // By color and pawn structure
Major, // By color and positions of major pieces (Queen, Rook) and King
Minor, // By color and positions of minor pieces (Knight, Bishop) and King
Major, // By color and positions of major pieces (Rook, Knight, Cannon) and King
Minor, // By color and positions of minor pieces (Advisor, Bishop) and King
NonPawn, // By color and non-pawn material positions
PieceTo, // By [piece][to] move
Continuation, // Combined history of move pairs
Expand Down
25 changes: 13 additions & 12 deletions src/search.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -908,7 +908,7 @@ Value Search::Worker::search(
mp.skip_quiet_moves();

// Reduced depth of the next LMR search
int lmrDepth = newDepth - r;
int lmrDepth = newDepth - r / 1024;

if (capture || givesCheck)
{
Expand Down Expand Up @@ -1065,29 +1065,29 @@ Value Search::Worker::search(

// Decrease reduction if position is or has been on the PV (~7 Elo)
if (ss->ttPv)
r -= 1 + (ttData.value > alpha) + (ttData.depth >= depth);
r -= 1024 + (ttData.value > alpha) * 1024 + (ttData.depth >= depth) * 1024;

// Decrease reduction for PvNodes (~0 Elo on STC, ~2 Elo on LTC)
if (PvNode)
r--;
r -= 1024;

// These reduction adjustments have no proven non-linear scaling

// Increase reduction for cut nodes (~4 Elo)
if (cutNode)
r += 2 - (ttData.depth >= depth && ss->ttPv);
r += 2048 - (ttData.depth >= depth && ss->ttPv) * 1024;

// Increase reduction if ttMove is a capture but the current move is not a capture (~3 Elo)
if (ttCapture && !capture)
r += 1 + (depth < 8);
r += 1024 + (depth < 8) * 1024;

// Increase reduction if next ply has a lot of fail high (~5 Elo)
if ((ss + 1)->cutoffCnt > 3)
r += 1 + allNode;
r += 1024 + allNode * 1024;

// For first picked move (ttMove) reduce reduction (~3 Elo)
else if (move == ttData.move)
r -= 2;
r -= 2048;

if (capture)
ss->statScore =
Expand All @@ -1099,7 +1099,7 @@ Value Search::Worker::search(
+ (*contHist[1])[movedPiece][move.to_sq()] - 4241;

// Decrease/increase reduction for moves with a good/bad history (~8 Elo)
r -= ss->statScore / 7600;
r -= ss->statScore * 1024 / 7600;

// Step 16. Late moves reduction / extension (LMR, ~117 Elo)
if (depth >= 2 && moveCount > 1)
Expand All @@ -1109,7 +1109,7 @@ Value Search::Worker::search(
// beyond the first move depth.
// To prevent problems when the max value is less than the min value,
// std::clamp has been replaced by a more robust implementation.
Depth d = std::max(1, std::min(newDepth - r, newDepth + !allNode));
Depth d = std::max(1, std::min(newDepth - r / 1024, newDepth + !allNode));

value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, d, true);

Expand Down Expand Up @@ -1137,10 +1137,11 @@ Value Search::Worker::search(
{
// Increase reduction if ttMove is not present (~6 Elo)
if (!ttData.move)
r += 2;
r += 2048;

// Note that if expected reduction is high, we reduce search depth by 1 here (~9 Elo)
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 3), !cutNode);
value =
-search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 3072), !cutNode);
}

// For PV nodes only, do a full PV search on the first move or after a fail high,
Expand Down Expand Up @@ -1624,7 +1625,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta)

Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) const {
int reductionScale = reductions[d] * reductions[mn];
return (reductionScale + 2423 - delta * 1198 / rootDelta) / 1182 + (!i && reductionScale > 694);
return (reductionScale + 2423 - delta * 1198 / rootDelta) + (!i && reductionScale > 694) * 1024;
}

// elapsed() returns the time elapsed since the search started. If the
Expand Down

0 comments on commit 85a83ba

Please sign in to comment.