NetDB: Fix usage of dbResponseTime stat

Actually update the stat for stores in dbStoreSent();
we are generally storing to different ffs than lookups, so we need the
stat for stores as well, since we use it as the timeout in StoreJob.

Change from 1-day to 1-hour stat.
Switch to avgOrLifetimeAvg() so the rate is always valid.
Reduce max time used for timeout.
This allows more peers to be tried before total timeout
Previously, the per-peer timeout was almost always the max.
Make sendStore() package private.
Javadocs and cleanups.
This commit is contained in:
zzz
2021-12-28 08:56:47 -05:00
parent d0e72aca66
commit bef729463d
3 changed files with 35 additions and 13 deletions

View File

@@ -155,8 +155,18 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
sendStore(localRouterInfo.getIdentity().calculateHash(), localRouterInfo, null, null, PUBLISH_TIMEOUT, null);
}
/**
* Send out a store.
*
* @param key the DatabaseEntry hash
* @param onSuccess may be null, always called if we are ff and ds is an RI
* @param onFailure may be null, ignored if we are ff and ds is an RI
* @param sendTimeout ignored if we are ff and ds is an RI
* @param toIgnore may be null, if non-null, all attempted and skipped targets will be added as of 0.9.53,
* unused if we are ff and ds is an RI
*/
@Override
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) {
void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) {
// if we are a part of the floodfill netDb, don't send out our own leaseSets as part
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
// perhaps statistically adjust this so we are the source every 1/N times... or something.

View File

@@ -790,7 +790,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
return;
}
RepublishLeaseSetJob j = null;
RepublishLeaseSetJob j;
synchronized (_publishingLeaseSets) {
j = _publishingLeaseSets.get(h);
if (j == null) {
@@ -1467,7 +1467,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
* to be greater than MAX_PER_PEER_TIMEOUT * TIMEOUT_MULTIPLIER by a factor of at least
* 3 or 4, to allow at least that many peers to be attempted for a store.
*/
private static final int MAX_PER_PEER_TIMEOUT = 7*1000;
private static final int MAX_PER_PEER_TIMEOUT = 5100;
private static final int TIMEOUT_MULTIPLIER = 3;
/** todo: does this need more tuning? */
@@ -1475,7 +1475,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
double responseTime = MAX_PER_PEER_TIMEOUT;
if (prof != null && prof.getIsExpandedDB()) {
responseTime = prof.getDbResponseTime().getRate(24*60*60*1000l).getAverageValue();
responseTime = prof.getDbResponseTime().getRate(60*60*1000L).getAvgOrLifetimeAvg();
// if 0 then there is no data, set to max.
if (responseTime <= 0 || responseTime > MAX_PER_PEER_TIMEOUT)
responseTime = MAX_PER_PEER_TIMEOUT;
@@ -1485,8 +1485,17 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
return TIMEOUT_MULTIPLIER * (int)responseTime; // give it up to 3x the average response time
}
/** unused (overridden in FNDF) */
public abstract void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore);
/**
* See implementation in FNDF
*
* @param key the DatabaseEntry hash
* @param onSuccess may be null, always called if we are ff and ds is an RI
* @param onFailure may be null, ignored if we are ff and ds is an RI
* @param sendTimeout ignored if we are ff and ds is an RI
* @param toIgnore may be null, if non-null, all attempted and skipped targets will be added as of 0.9.53,
* unused if we are ff and ds is an RI
*/
abstract void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore);
/**
* Increment in the negative lookup cache

View File

@@ -113,7 +113,10 @@ public class ProfileManagerImpl implements ProfileManager {
* was successfully tested with the given round trip latency
*
* Non-blocking. Will not update the profile if we can't get the lock.
*
* @deprecated disabled
*/
@Deprecated
@SuppressWarnings("deprecation")
public void tunnelTestSucceeded(Hash peer, long responseTimeMs) {
if (PeerProfile.ENABLE_TUNNEL_TEST_RESPONSE_TIME) {
@@ -260,10 +263,11 @@ public class ProfileManagerImpl implements ProfileManager {
* Note that we've confirmed a successful send of db data to the peer (though we haven't
* necessarily requested it again from them, so they /might/ be lying)
*
* This is not really interesting, since they could be lying, so we do not
* increment any DB stats at all. On verify, call dbStoreSuccessful().
* As of 0.9.53 we update the DbResponseTime.
*
* @param responseTimeMs ignored
* This will force creation of DB stats
*
* @param responseTimeMs duration
*/
public void dbStoreSent(Hash peer, long responseTimeMs) {
PeerProfile data = getProfile(peer);
@@ -271,10 +275,9 @@ public class ProfileManagerImpl implements ProfileManager {
long now = _context.clock().now();
data.setLastHeardFrom(now);
data.setLastSendSuccessful(now);
//if (!data.getIsExpandedDB())
// data.expandDBProfile();
//DBHistory hist = data.getDBHistory();
//hist.storeSuccessful();
if (!data.getIsExpandedDB())
data.expandDBProfile();
data.getDbResponseTime().addData(responseTimeMs, responseTimeMs);
}
/**