forked from I2P_Developers/i2p.i2p
* ProfileOrganizer:
- Fix expiration of old profiles - Don't exceed high cap limit between reorgs - Reduce max reorg cycle time - Reduce new bonus values
This commit is contained in:
@@ -1,3 +1,12 @@
|
||||
2011-10-31 zzz
|
||||
* NetDB: Reduce max RI publish interval
|
||||
* ProfileOrganizer:
|
||||
- Fix expiration of old profiles
|
||||
- Don't exceed high cap limit between reorgs
|
||||
- Reduce max reorg cycle time
|
||||
- Reduce new bonus values
|
||||
* SSU: Increase threshold for incremented cost
|
||||
|
||||
2011-10-29 zzz
|
||||
* BuildHandler: Add router.buildHandlerThreads config setting
|
||||
* CapacityCalculator: Small adjustment for XOR distance to
|
||||
|
@@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 3;
|
||||
public final static long BUILD = 4;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
@@ -18,9 +18,9 @@ class CapacityCalculator {
|
||||
|
||||
// total of all possible bonuses should be less than 4, since
|
||||
// crappy peers start at 1 and the base is 5.
|
||||
private static final double BONUS_NEW = 1.25;
|
||||
private static final double BONUS_ESTABLISHED = 1;
|
||||
private static final double BONUS_SAME_COUNTRY = .85;
|
||||
private static final double BONUS_NEW = 0.85;
|
||||
private static final double BONUS_ESTABLISHED = 0.65;
|
||||
private static final double BONUS_SAME_COUNTRY = 0.45;
|
||||
private static final double BONUS_XOR = .25;
|
||||
private static final double PENALTY_UNREACHABLE = 2;
|
||||
|
||||
|
@@ -8,7 +8,7 @@ import net.i2p.data.DataHelper;
|
||||
* Order profiles by their capacity, but backwards (highest capacity / value first).
|
||||
*
|
||||
*/
|
||||
class InverseCapacityComparator implements Comparator {
|
||||
class InverseCapacityComparator implements Comparator<PeerProfile> {
|
||||
/**
|
||||
* Compare the two objects backwards. The standard comparator returns
|
||||
* -1 if lhs is less than rhs, 1 if lhs is greater than rhs, or 0 if they're
|
||||
@@ -18,11 +18,7 @@ class InverseCapacityComparator implements Comparator {
|
||||
* @return -1 if the right hand side is smaller, 1 if the left hand side is
|
||||
* smaller, or 0 if they are the same peer (Comparator.compare() inverted)
|
||||
*/
|
||||
public int compare(Object lhs, Object rhs) {
|
||||
if ( (lhs == null) || (rhs == null) || (!(lhs instanceof PeerProfile)) || (!(rhs instanceof PeerProfile)) )
|
||||
throw new ClassCastException("Only profiles can be compared - lhs = " + lhs + " rhs = " + rhs);
|
||||
PeerProfile left = (PeerProfile)lhs;
|
||||
PeerProfile right= (PeerProfile)rhs;
|
||||
public int compare(PeerProfile left, PeerProfile right) {
|
||||
|
||||
double rval = right.getCapacityValue();
|
||||
double lval = left.getCapacityValue();
|
||||
|
@@ -45,7 +45,13 @@ class PeerManager {
|
||||
private final Map<Hash, String> _capabilitiesByPeer;
|
||||
private static final long REORGANIZE_TIME = 45*1000;
|
||||
private static final long REORGANIZE_TIME_MEDIUM = 123*1000;
|
||||
private static final long REORGANIZE_TIME_LONG = 551*1000;
|
||||
/**
|
||||
* We don't want this much longer than the average connect time,
|
||||
* as the CapacityCalculator now includes connection as a factor.
|
||||
* This must also be less than 10 minutes, which is the shortest
|
||||
* Rate contained in the profile, as the Rates must be coalesced.
|
||||
*/
|
||||
private static final long REORGANIZE_TIME_LONG = 351*1000;
|
||||
|
||||
public static final String TRACKED_CAPS = "" +
|
||||
FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL +
|
||||
@@ -68,7 +74,7 @@ class PeerManager {
|
||||
_persistenceHelper = new ProfilePersistenceHelper(context);
|
||||
_organizer = context.profileOrganizer();
|
||||
_organizer.setUs(context.routerHash());
|
||||
_capabilitiesByPeer = new ConcurrentHashMap(128);
|
||||
_capabilitiesByPeer = new ConcurrentHashMap(256);
|
||||
_peersByCapability = new HashMap(TRACKED_CAPS.length());
|
||||
for (int i = 0; i < TRACKED_CAPS.length(); i++)
|
||||
_peersByCapability.put(Character.valueOf(Character.toLowerCase(TRACKED_CAPS.charAt(i))), new ConcurrentHashSet());
|
||||
|
@@ -106,17 +106,17 @@ public class PeerProfile {
|
||||
}
|
||||
|
||||
/** @since 0.8.11 */
|
||||
public boolean isEstablished() {
|
||||
boolean isEstablished() {
|
||||
return _context.commSystem().isEstablished(_peer);
|
||||
}
|
||||
|
||||
/** @since 0.8.11 */
|
||||
public boolean wasUnreachable() {
|
||||
boolean wasUnreachable() {
|
||||
return _context.commSystem().wasUnreachable(_peer);
|
||||
}
|
||||
|
||||
/** @since 0.8.11 */
|
||||
public boolean isSameCountry() {
|
||||
boolean isSameCountry() {
|
||||
String us = _context.commSystem().getOurCountry();
|
||||
return us != null && us.equals(_context.commSystem().getCountry(_peer));
|
||||
}
|
||||
@@ -126,7 +126,7 @@ public class PeerProfile {
|
||||
* @return -127 to +128, lower is closer
|
||||
* @since 0.8.11
|
||||
*/
|
||||
public int getXORDistance() {
|
||||
int getXORDistance() {
|
||||
return _distance;
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ public class PeerProfile {
|
||||
else
|
||||
_tunnelTestResponseTimeAvg = 0.75*_tunnelTestResponseTimeAvg + .25*(double)ms;
|
||||
|
||||
if ( (_peer != null) && (_log.shouldLog(Log.INFO)) )
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Updating tunnel test time for " + _peer.toBase64().substring(0,6)
|
||||
+ " to " + _tunnelTestResponseTimeAvg + " via " + ms);
|
||||
}
|
||||
@@ -513,20 +513,19 @@ public class PeerProfile {
|
||||
void setIsFailing(boolean val) { _isFailing = val; }
|
||||
|
||||
@Override
|
||||
public int hashCode() { return (_peer == null ? 0 : _peer.hashCode()); }
|
||||
public int hashCode() { return _peer.hashCode(); }
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null ||
|
||||
(!(obj instanceof PeerProfile)) ||
|
||||
_peer == null)
|
||||
!(obj instanceof PeerProfile))
|
||||
return false;
|
||||
PeerProfile prof = (PeerProfile)obj;
|
||||
return _peer.equals(prof.getPeer());
|
||||
return _peer.equals(prof._peer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() { return "Profile: " + getPeer().toBase64(); }
|
||||
public String toString() { return "Profile: " + _peer; }
|
||||
|
||||
/**
|
||||
* New measurement is 12KB per expanded profile. (2009-03 zzz)
|
||||
|
@@ -92,26 +92,26 @@ public class ProfileOrganizer {
|
||||
private static final int ABSOLUTE_MAX_HIGHCAP_PEERS = 150;
|
||||
|
||||
/** synchronized against this lock when updating the tier that peers are located in (and when fetching them from a peer) */
|
||||
private final ReentrantReadWriteLock _reorganizeLock = new ReentrantReadWriteLock(true);
|
||||
private final ReentrantReadWriteLock _reorganizeLock = new ReentrantReadWriteLock(false);
|
||||
|
||||
public ProfileOrganizer(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(ProfileOrganizer.class);
|
||||
_comp = new InverseCapacityComparator();
|
||||
_fastPeers = new HashMap(16);
|
||||
_highCapacityPeers = new HashMap(32);
|
||||
_wellIntegratedPeers = new HashMap(16);
|
||||
_fastPeers = new HashMap(32);
|
||||
_highCapacityPeers = new HashMap(64);
|
||||
_wellIntegratedPeers = new HashMap(128);
|
||||
_notFailingPeers = new HashMap(256);
|
||||
_notFailingPeersList = new ArrayList(256);
|
||||
_failingPeers = new HashMap(16);
|
||||
_strictCapacityOrder = new TreeSet(_comp);
|
||||
_persistenceHelper = new ProfilePersistenceHelper(_context);
|
||||
|
||||
_context.statManager().createRateStat("peer.profileSortTime", "How long the reorg takes sorting peers", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileCoalesceTime", "How long the reorg takes coalescing peer stats", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileThresholdTime", "How long the reorg takes determining the tier thresholds", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profilePlaceTime", "How long the reorg takes placing peers in the tiers", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileReorgTime", "How long the reorg takes overall", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileSortTime", "How long the reorg takes sorting peers", "Peers", new long[] { 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileCoalesceTime", "How long the reorg takes coalescing peer stats", "Peers", new long[] { 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileThresholdTime", "How long the reorg takes determining the tier thresholds", "Peers", new long[] { 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profilePlaceTime", "How long the reorg takes placing peers in the tiers", "Peers", new long[] { 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileReorgTime", "How long the reorg takes overall", "Peers", new long[] { 60*60*1000 });
|
||||
// used in DBHistory
|
||||
_context.statManager().createRequiredRateStat("peer.failedLookupRate", "Net DB Lookup fail rate", "Peers", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
@@ -162,17 +162,34 @@ public class ProfileOrganizer {
|
||||
*
|
||||
*/
|
||||
public PeerProfile addProfile(PeerProfile profile) {
|
||||
if ( (profile == null) || (profile.getPeer() == null) ) return null;
|
||||
Hash peer = profile.getPeer();
|
||||
if (profile == null || peer == null) return null;
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("New profile created for " + profile.getPeer().toBase64());
|
||||
_log.debug("New profile created for " + peer);
|
||||
|
||||
PeerProfile old = getProfile(profile.getPeer());
|
||||
PeerProfile old = getProfile(peer);
|
||||
profile.coalesceStats();
|
||||
if (!getWriteLock())
|
||||
return old;
|
||||
try {
|
||||
locked_placeProfile(profile);
|
||||
// Don't do this, as it may substantially exceed
|
||||
// the high cap and fast limits in-between reorganizations.
|
||||
// just add to the not-failing tier, and maybe the high cap tier,
|
||||
// it will get promoted in the next reorganization
|
||||
// if appropriate. This lessens high-cap churn.
|
||||
// The downside is that new peers don't become high cap until the next reorg
|
||||
// if we are at our limit.
|
||||
//locked_placeProfile(profile);
|
||||
_notFailingPeers.put(peer, profile);
|
||||
if (old == null)
|
||||
_notFailingPeersList.add(peer);
|
||||
// Add to high cap only if we have room. Don't add to Fast; wait for reorg.
|
||||
if (_thresholdCapacityValue <= profile.getCapacityValue() &&
|
||||
isSelectable(peer) &&
|
||||
_highCapacityPeers.size() < getMaximumHighCapPeers()) {
|
||||
_highCapacityPeers.put(peer, profile);
|
||||
}
|
||||
_strictCapacityOrder.add(profile);
|
||||
} finally { releaseWriteLock(); }
|
||||
return old;
|
||||
@@ -721,6 +738,7 @@ public class ProfileOrganizer {
|
||||
long thresholdTime = 0;
|
||||
long placeTime = 0;
|
||||
int profileCount = 0;
|
||||
int expiredCount = 0;
|
||||
|
||||
long uptime = _context.router().getUptime();
|
||||
long expireOlderThan = -1;
|
||||
@@ -748,8 +766,11 @@ public class ProfileOrganizer {
|
||||
long sortStart = System.currentTimeMillis();
|
||||
for (Iterator<PeerProfile> iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
||||
PeerProfile prof = iter.next();
|
||||
if ( (expireOlderThan > 0) && (prof.getLastSendSuccessful() <= expireOlderThan) )
|
||||
if ( (expireOlderThan > 0) && (prof.getLastSendSuccessful() <= expireOlderThan) ) {
|
||||
expiredCount++;
|
||||
continue; // drop, but no need to delete, since we don't periodically reread
|
||||
// TODO maybe we should delete files, otherwise they are only deleted at restart
|
||||
}
|
||||
|
||||
if (shouldCoalesce) {
|
||||
long coalesceStart = System.currentTimeMillis();
|
||||
@@ -775,8 +796,7 @@ public class ProfileOrganizer {
|
||||
|
||||
long placeStart = System.currentTimeMillis();
|
||||
|
||||
for (Iterator<PeerProfile> iter = allPeers.iterator(); iter.hasNext(); ) {
|
||||
PeerProfile profile = iter.next();
|
||||
for (PeerProfile profile : _strictCapacityOrder) {
|
||||
locked_placeProfile(profile);
|
||||
}
|
||||
|
||||
@@ -792,7 +812,8 @@ public class ProfileOrganizer {
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue
|
||||
_log.info("Profiles reorganized. Expired: " + expiredCount
|
||||
+ " Averages: [integration: " + _thresholdIntegrationValue
|
||||
+ ", capacity: " + _thresholdCapacityValue + ", speed: " + _thresholdSpeedValue + "]");
|
||||
/*****
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
@@ -889,8 +910,6 @@ public class ProfileOrganizer {
|
||||
int maxHighCapPeers = getMaximumHighCapPeers();
|
||||
int numToDemote = _highCapacityPeers.size() - maxHighCapPeers;
|
||||
if (numToDemote > 0) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Need to explicitly demote " + numToDemote + " peers from the high cap group");
|
||||
// sorted by capacity, highest-first
|
||||
Iterator<PeerProfile> iter = _strictCapacityOrder.iterator();
|
||||
for (int i = 0; iter.hasNext() && i < maxHighCapPeers; ) {
|
||||
@@ -904,11 +923,14 @@ public class ProfileOrganizer {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Demoted " + numToDemote + " peers from high cap, size now " + _highCapacityPeers.size());
|
||||
}
|
||||
}
|
||||
|
||||
/** how many not failing/active peers must we have? */
|
||||
private final static int MIN_NOT_FAILING_ACTIVE = 3;
|
||||
|
||||
/**
|
||||
* I'm not sure how much I dislike the following - if there aren't enough
|
||||
* active and not-failing peers, pick the most reliable active peers and
|
||||
@@ -917,9 +939,7 @@ public class ProfileOrganizer {
|
||||
*/
|
||||
private void locked_unfailAsNecessary() {
|
||||
int notFailingActive = 0;
|
||||
for (Iterator<Hash> iter = _notFailingPeers.keySet().iterator(); iter.hasNext(); ) {
|
||||
Hash key = iter.next();
|
||||
PeerProfile peer = _notFailingPeers.get(key);
|
||||
for (PeerProfile peer : _notFailingPeers.values()) {
|
||||
if (peer.getIsActive())
|
||||
notFailingActive++;
|
||||
if (notFailingActive >= MIN_NOT_FAILING_ACTIVE) {
|
||||
|
Reference in New Issue
Block a user