Skip to content

Commit

Permalink
Allow expired entries to be resurrected (fixes #30)
Browse files Browse the repository at this point in the history
An expired entry may be replaced in the cache prior to it being evicted
by a maintenance cycle. When the maintenace is being performed and reads
a stale timestamp, during eviction is must validate that the entry is
still an eviction candidate. This was reported to have caused excessive
evictions for short time durations, so the race was not benign as
originally thought.
  • Loading branch information
ben-manes committed Sep 26, 2015
1 parent 73b4443 commit 0d05306
Show file tree
Hide file tree
Showing 2 changed files with 230 additions and 39 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -380,46 +380,12 @@ void evictEntries() {

Node<K, V> next = node.getNextInAccessOrder();
if (node.getWeight() != 0) {
evictEntry(node, RemovalCause.SIZE);
evictEntry(node, RemovalCause.SIZE, 0L);
}
node = next;
}
}

@GuardedBy("evictionLock")
void evictEntry(Node<K, V> node, RemovalCause cause) {
K key = node.getKey();
V value = node.getValue();
boolean[] removed = new boolean[1];
RemovalCause actualCause = (key == null) || (value == null) ? RemovalCause.COLLECTED : cause;

data.computeIfPresent(node.getKeyReference(), (k, n) -> {
if (n == node) {
writer.delete(key, value, actualCause);
removed[0] = true;
return null;
}
return n;
});

makeDead(node);
if (evicts() || expiresAfterAccess()) {
accessOrderDeque().remove(node);
}
if (expiresAfterWrite()) {
writeOrderDeque().remove(node);
}

if (removed[0]) {
statsCounter().recordEviction();
if (hasRemovalListener()) {
// Notify the listener only if the entry was evicted. This must be performed as the last
// step during eviction to safe guard against the executor rejecting the notification task.
notifyRemoval(key, value, actualCause);
}
}
}

@GuardedBy("evictionLock")
void expireEntries() {
long now = expirationTicker().read();
Expand All @@ -430,7 +396,7 @@ void expireEntries() {
if ((node == null) || (node.getAccessTime() > expirationTime)) {
break;
}
evictEntry(node, RemovalCause.EXPIRED);
evictEntry(node, RemovalCause.EXPIRED, now);
}
}
if (expiresAfterWrite()) {
Expand All @@ -440,7 +406,7 @@ void expireEntries() {
if ((node == null) || (node.getWriteTime() > expirationTime)) {
break;
}
evictEntry(node, RemovalCause.EXPIRED);
evictEntry(node, RemovalCause.EXPIRED, now);
}
}
}
Expand All @@ -454,6 +420,69 @@ boolean hasExpired(Node<K, V> node, long now) {
|| (expiresAfterWrite() && (now - node.getWriteTime() >= expiresAfterWriteNanos()));
}

/**
* Attempts to evict the entry based on the given removal cause. A removal due to expiration may
* be ignored if the entry was since updated and is no longer eligible for eviction.
*
* @param node the entry to evict
* @param cause the reason to evict
* @param now the current time, used only if expiring
*/
@GuardedBy("evictionLock")
void evictEntry(Node<K, V> node, RemovalCause cause, long now) {
K key = node.getKey();
V value = node.getValue();
boolean[] removed = new boolean[1];
boolean[] resurrect = new boolean[1];
RemovalCause actualCause = (key == null) || (value == null) ? RemovalCause.COLLECTED : cause;

data.computeIfPresent(node.getKeyReference(), (k, n) -> {
if (n != node) {
return n;
}
if (actualCause == RemovalCause.EXPIRED) {
boolean expired = false;
if (expiresAfterAccess()) {
long expirationTime = now - expiresAfterAccessNanos();
expired |= n.getAccessTime() <= expirationTime;
}
if (expiresAfterWrite()) {
long expirationTime = now - expiresAfterWriteNanos();
expired |= n.getWriteTime() <= expirationTime;
}
if (!expired) {
resurrect[0] = true;
return n;
}
}
writer.delete(key, value, actualCause);
removed[0] = true;
return null;
});

if (resurrect[0]) {
// The entry is no longer expired and will be reordered in the next maintenance cycle
return;
}

makeDead(node);
if (evicts() || expiresAfterAccess()) {
accessOrderDeque().remove(node);
}
if (expiresAfterWrite()) {
writeOrderDeque().remove(node);
}

if (removed[0]) {
statsCounter().recordEviction();
if (hasRemovalListener()) {
// Notify the listener only if the entry was evicted. This must be performed as the last
// step during eviction to safe guard against the executor rejecting the notification task.
notifyRemoval(key, value, actualCause);
}
}
}

/**
* Performs the post-processing work required after a read.
*
Expand Down Expand Up @@ -577,7 +606,7 @@ void drainKeyReferences() {
while ((keyRef = keyReferenceQueue().poll()) != null) {
Node<K, V> node = data.get(keyRef);
if (node != null) {
evictEntry(node, RemovalCause.COLLECTED);
evictEntry(node, RemovalCause.COLLECTED, 0L);
}
}
}
Expand All @@ -594,7 +623,7 @@ void drainValueReferences() {
InternalReference<V> ref = (InternalReference<V>) valueRef;
Node<K, V> node = data.get(ref.getKeyReference());
if ((node != null) && (valueRef == node.getValueReference())) {
evictEntry(node, RemovalCause.COLLECTED);
evictEntry(node, RemovalCause.COLLECTED, 0L);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
/*
* Copyright 2015 Ben Manes. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.benmanes.caffeine.cache.issues;

import static com.github.benmanes.caffeine.testing.IsFutureValue.futureOf;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;

import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;

import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;

import com.github.benmanes.caffeine.cache.AsyncLoadingCache;
import com.github.benmanes.caffeine.cache.CacheLoader;
import com.github.benmanes.caffeine.cache.Caffeine;

/**
* Issue #30: Unexpected cache misses with <tt>expireAfterWrite</tt> using multiple keys.
* <p>
* Prior to eviction, the cache must revalidate that the entry has expired. If the entry was updated
* but the maintenance thread reads a stale value, then the entry may be prematurely expired. The
* removal must detect that the entry was "resurrected" and cancel the expiration.
*
* @author yurgis2
* @author [email protected] (Ben Manes)
*/
public final class Issue30Test {
private static final boolean DEBUG = false;

private static final String A_KEY = "foo";
private static final String A_ORIGINAL = "foo0";
private static final String A_UPDATE_1 = "foo1";
private static final String A_UPDATE_2 = "foo2";

private static final String B_KEY = "bar";
private static final String B_ORIGINAL = "bar0";
private static final String B_UPDATE_1 = "bar1";
private static final String B_UPDATE_2 = "bar2";

private static final int TTL = 100;
private static final int EPSILON = 10;

@DataProvider(name = "cache")
public Object[][] providesCache() {
ConcurrentMap<String, String> source = new ConcurrentHashMap<>();
ConcurrentMap<String, Date> lastLoad = new ConcurrentHashMap<>();
AsyncLoadingCache<String, String> cache = Caffeine.newBuilder()
.expireAfterWrite(TTL, TimeUnit.MILLISECONDS)
.buildAsync(new Loader(source, lastLoad));
return new Object[][] {{ cache, source, lastLoad }};
}

// FIXME: For now, this test has to be run manually due to CI & build having enough shared work
// to cause the delays to be exceeded. This tests needs to be isolated so that no other machine
// work causes the timing periods to exceed the thresholds.
@Test(enabled = false, dataProvider = "cache", invocationCount = 100, threadPoolSize = 10)
public void expiration(AsyncLoadingCache<String, String> cache,
ConcurrentMap<String, String> source, ConcurrentMap<String, Date> lastLoad) throws Exception {
initialValues(cache, source, lastLoad);
firstUpdate(cache, source);
secondUpdate(cache, source);
}

private void initialValues(AsyncLoadingCache<String, String> cache,
ConcurrentMap<String, String> source, ConcurrentMap<String, Date> lastLoad)
throws InterruptedException, ExecutionException {
source.put(A_KEY, A_ORIGINAL);
source.put(B_KEY, B_ORIGINAL);
lastLoad.clear();

assertThat("should serve initial value", cache.get(A_KEY), is(futureOf(A_ORIGINAL)));
assertThat("should serve initial value", cache.get(B_KEY), is(futureOf(B_ORIGINAL)));
}

private void firstUpdate(AsyncLoadingCache<String, String> cache,
ConcurrentMap<String, String> source) throws InterruptedException, ExecutionException {
source.put(A_KEY, A_UPDATE_1);
source.put(B_KEY, B_UPDATE_1);

assertThat("should serve cached initial value", cache.get(A_KEY), is(futureOf(A_ORIGINAL)));
assertThat("should serve cached initial value", cache.get(B_KEY), is(futureOf(B_ORIGINAL)));

Thread.sleep(EPSILON); // sleep for less than expiration
assertThat("still serve cached initial value", cache.get(A_KEY), is(futureOf(A_ORIGINAL)));
assertThat("still serve cached initial value", cache.get(B_KEY), is(futureOf(B_ORIGINAL)));

Thread.sleep(TTL + EPSILON); // sleep until expiration
assertThat("now serve first updated value", cache.get(A_KEY), is(futureOf(A_UPDATE_1)));
assertThat("now serve first updated value", cache.get(B_KEY), is(futureOf(B_UPDATE_1)));
}

private void secondUpdate(AsyncLoadingCache<String, String> cache,
ConcurrentMap<String, String> source) throws Exception {
source.put(A_KEY, A_UPDATE_2);
source.put(B_KEY, B_UPDATE_2);

assertThat("serve cached first updated value", cache.get(A_KEY), is(futureOf(A_UPDATE_1)));
assertThat("serve cached first updated value", cache.get(B_KEY), is(futureOf(B_UPDATE_1)));

Thread.sleep(EPSILON); // sleep for less than expiration
assertThat("serve cached first updated value", cache.get(A_KEY), is(futureOf(A_UPDATE_1)));
assertThat("serve cached first updated value", cache.get(A_KEY), is(futureOf(A_UPDATE_1)));
}

static final class Loader implements CacheLoader<String, String> {
final ConcurrentMap<String, String> source;
final ConcurrentMap<String, Date> lastLoad;

Loader(ConcurrentMap<String, String> source, ConcurrentMap<String, Date> lastLoad) {
this.source = source;
this.lastLoad = lastLoad;
}

@Override
public String load(String key) {
throw new IllegalStateException();
}

@Override
public CompletableFuture<String> asyncLoad(String key, Executor executor) {
reportCacheMiss(key);
return CompletableFuture.completedFuture(source.get(key));
}

private void reportCacheMiss(String key) {
Date now = new Date();
Date last = lastLoad.get(key);
lastLoad.put(key, now);

if (DEBUG) {
String time = new SimpleDateFormat("hh:MM:ss.SSS").format(new Date());
if (last == null) {
System.out.println(key + ": first load @ " + time);
} else {
long duration = (now.getTime() - last.getTime());
System.out.println(key + ": " + duration + "ms after last load @ " + time);
}
}
}
}
}

0 comments on commit 0d05306

Please sign in to comment.