NetDB: Expiration tweaks

Start expire job sooner if forced floodfill or downtime was short
Don't run refresh routers job if forced floodfill or downtime was short or vmcommsystem
Increase expire probability
Don't expire routers close to us just before midnight
Don't start expire leases job until 11 minutes after startup
Base probability out of 128 to reduce random usage
Consolidate now() calls
This commit is contained in:
zzz
2023-02-11 06:02:57 -05:00
parent 5d3dffbaa0
commit d6c6b5e092
3 changed files with 33 additions and 14 deletions

View File

@@ -73,13 +73,16 @@ class ExpireRoutersJob extends JobImpl {
RouterKeyGenerator gen = getContext().routerKeyGenerator();
long now = getContext().clock().now();
long cutoff = now - 30*60*1000;
boolean almostMidnight = gen.getTimeTillMidnight() < FloodfillNetworkDatabaseFacade.NEXT_RKEY_RI_ADVANCE_TIME - 30*60*1000;
Hash us = getContext().routerHash();
boolean isFF = _facade.floodfillEnabled();
byte[] ourRKey = isFF ? us.getData() : null;
int pdrop = Math.max(10, Math.min(50, (100 * count / LIMIT_ROUTERS) - 100));
// chance in 128
int pdrop = Math.max(10, Math.min(80, (128 * count / LIMIT_ROUTERS) - 128));
int removed = 0;
if (_log.shouldLog(Log.INFO))
_log.info("Expiring routers, count = " + count + " drop probability " + (count > LIMIT_ROUTERS ? pdrop : 0) + '%');
_log.info("Expiring routers, count = " + count + " drop probability " +
(count > LIMIT_ROUTERS ? pdrop * 100 / 128 : 0) + '%');
for (Map.Entry<Hash, DatabaseEntry> entry : entries) {
DatabaseEntry e = entry.getValue();
if (e.getType() != DatabaseEntry.KEY_TYPE_ROUTERINFO) {
@@ -103,9 +106,16 @@ class ExpireRoutersJob extends JobImpl {
// they have to be within 1/256 of the keyspace
if (distance < 256)
continue;
// TODO maybe: long until = gen.getTimeTillMidnight();
if (almostMidnight) {
// almost midnight, recheck with tomorrow's keys
rkey = gen.getNextRoutingKey(key).getData();
distance = (((rkey[0] ^ ourRKey[0]) & 0xff) << 8) |
((rkey[1] ^ ourRKey[1]) & 0xff);
if (distance < 256)
continue;
}
}
if (getContext().random().nextInt(100) < pdrop) {
if (getContext().random().nextInt(128) < pdrop) {
_facade.dropAfterLookupFailed(key);
removed++;
}

View File

@@ -51,7 +51,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
private static final int FLOOD_PRIORITY = OutNetMessage.PRIORITY_NETDB_FLOOD;
private static final int FLOOD_TIMEOUT = 30*1000;
private static final long NEXT_RKEY_RI_ADVANCE_TIME = 45*60*1000;
static final long NEXT_RKEY_RI_ADVANCE_TIME = 45*60*1000;
private static final long NEXT_RKEY_LS_ADVANCE_TIME = 10*60*1000;
private static final int NEXT_FLOOD_QTY = 2;
@@ -85,10 +85,15 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
_context.jobQueue().addJob(_ffMonitor);
_lookupThrottler = new LookupThrottler();
// refresh old routers
Job rrj = new RefreshRoutersJob(_context, this);
rrj.getTiming().setStartAfter(_context.clock().now() + 5*60*1000);
_context.jobQueue().addJob(rrj);
boolean isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
long down = _context.router().getEstimatedDowntime();
if (!_context.commSystem().isDummy() &&
(down == 0 || (!isFF && down > 30*60*1000) || (isFF && down > 24*60*60*1000))) {
// refresh old routers
Job rrj = new RefreshRoutersJob(_context, this);
rrj.getTiming().setStartAfter(_context.clock().now() + 5*60*1000);
_context.jobQueue().addJob(rrj);
}
}
@Override

View File

@@ -294,8 +294,6 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
} catch (IOException ioe) {
throw new RuntimeException("Unable to initialize netdb storage", ioe);
}
//_ds = new TransientDataStore();
// _exploreKeys = new HashSet(64);
_dbDir = dbDir;
_negativeCache = new NegativeLookupCache(_context);
_blindCache.startup();
@@ -307,14 +305,20 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
// expire old leases
Job elj = new ExpireLeasesJob(_context, this);
elj.getTiming().setStartAfter(_context.clock().now() + 2*60*1000);
long now = _context.clock().now();
elj.getTiming().setStartAfter(now + 11*60*1000);
_context.jobQueue().addJob(elj);
//// expire some routers
// Don't run until after RefreshRoutersJob has run, and after validate() will return invalid for old routers.
if (!_context.commSystem().isDummy()) {
Job erj = new ExpireRoutersJob(_context, this);
erj.getTiming().setStartAfter(_context.clock().now() + ROUTER_INFO_EXPIRATION_FLOODFILL + 10*60*1000);
boolean isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
long down = _context.router().getEstimatedDowntime();
long delay = (down == 0 || (!isFF && down > 30*60*1000) || (isFF && down > 24*60*60*1000)) ?
ROUTER_INFO_EXPIRATION_FLOODFILL + 10*60*1000 :
10*60*1000;
erj.getTiming().setStartAfter(now + delay);
_context.jobQueue().addJob(erj);
}
@@ -333,7 +337,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
// We're trying to minimize the ff connections to lessen the load on the
// floodfills, and in any case let's try to build some real expl. tunnels first.
// No rush, it only runs every 30m.
_exploreJob.getTiming().setStartAfter(_context.clock().now() + EXPLORE_JOB_DELAY);
_exploreJob.getTiming().setStartAfter(now + EXPLORE_JOB_DELAY);
_context.jobQueue().addJob(_exploreJob);
} else {
_log.warn("Operating in quiet mode - not exploring or pushing data proactively, simply reactively");