diff --git a/apps/routerconsole/java/src/net/i2p/router/web/NewsFetcher.java b/apps/routerconsole/java/src/net/i2p/router/web/NewsFetcher.java
index 66841ada9..4bd776e45 100644
--- a/apps/routerconsole/java/src/net/i2p/router/web/NewsFetcher.java
+++ b/apps/routerconsole/java/src/net/i2p/router/web/NewsFetcher.java
@@ -11,6 +11,7 @@ import net.i2p.data.DataHelper;
import net.i2p.router.RouterContext;
import net.i2p.router.RouterVersion;
import net.i2p.util.EepGet;
+import net.i2p.util.FileUtil;
import net.i2p.util.Log;
/**
@@ -26,6 +27,7 @@ public class NewsFetcher implements Runnable, EepGet.StatusListener {
public static final NewsFetcher getInstance() { return _instance; }
private static final String NEWS_FILE = "docs/news.xml";
+ private static final String TEMP_NEWS_FILE = "docs/news.xml.temp";
public NewsFetcher(I2PAppContext ctx) {
_context = ctx;
@@ -82,14 +84,18 @@ public class NewsFetcher implements Runnable, EepGet.StatusListener {
boolean shouldProxy = Boolean.valueOf(_context.getProperty(ConfigUpdateHandler.PROP_SHOULD_PROXY, ConfigUpdateHandler.DEFAULT_SHOULD_PROXY)).booleanValue();
String proxyHost = _context.getProperty(ConfigUpdateHandler.PROP_PROXY_HOST, ConfigUpdateHandler.DEFAULT_PROXY_HOST);
String port = _context.getProperty(ConfigUpdateHandler.PROP_PROXY_PORT, ConfigUpdateHandler.DEFAULT_PROXY_PORT);
+ File tempFile = new File(TEMP_NEWS_FILE);
+ if (tempFile.exists())
+ tempFile.delete();
+
int proxyPort = -1;
try {
proxyPort = Integer.parseInt(port);
EepGet get = null;
if (shouldProxy)
- get = new EepGet(_context, proxyHost, proxyPort, 10, NEWS_FILE, newsURL);
+ get = new EepGet(_context, proxyHost, proxyPort, 10, TEMP_NEWS_FILE, newsURL);
else
- get = new EepGet(_context, 10, NEWS_FILE, newsURL);
+ get = new EepGet(_context, 10, TEMP_NEWS_FILE, newsURL);
get.addStatusListener(this);
get.fetch();
} catch (Throwable t) {
@@ -230,11 +236,20 @@ public class NewsFetcher implements Runnable, EepGet.StatusListener {
public void transferComplete(long alreadyTransferred, long bytesTransferred, long bytesRemaining, String url, String outputFile) {
if (_log.shouldLog(Log.INFO))
_log.info("News fetched from " + url);
+
+ File temp = new File(TEMP_NEWS_FILE);
+ if (temp.exists()) {
+ boolean copied = FileUtil.copy(TEMP_NEWS_FILE, NEWS_FILE, true);
+ if (copied)
+ temp.delete();
+ }
checkForUpdates();
}
public void transferFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt) {
if (_log.shouldLog(Log.ERROR))
_log.error("Failed to fetch the news from " + url);
+ File temp = new File(TEMP_NEWS_FILE);
+ temp.delete();
}
}
diff --git a/apps/streaming/java/src/net/i2p/client/streaming/Connection.java b/apps/streaming/java/src/net/i2p/client/streaming/Connection.java
index b904c3742..cbf9fd0bb 100644
--- a/apps/streaming/java/src/net/i2p/client/streaming/Connection.java
+++ b/apps/streaming/java/src/net/i2p/client/streaming/Connection.java
@@ -71,7 +71,7 @@ public class Connection {
private long _lifetimeDupMessageSent;
private long _lifetimeDupMessageReceived;
- public static final long MAX_RESEND_DELAY = 30*1000;
+ public static final long MAX_RESEND_DELAY = 20*1000;
public static final long MIN_RESEND_DELAY = 10*1000;
/** wait up to 5 minutes after disconnection so we can ack/close packets */
diff --git a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionOptions.java b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionOptions.java
index 1adf4a3ce..988723730 100644
--- a/apps/streaming/java/src/net/i2p/client/streaming/ConnectionOptions.java
+++ b/apps/streaming/java/src/net/i2p/client/streaming/ConnectionOptions.java
@@ -82,7 +82,7 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
setConnectDelay(getInt(opts, PROP_CONNECT_DELAY, -1));
setProfile(getInt(opts, PROP_PROFILE, PROFILE_BULK));
setMaxMessageSize(getInt(opts, PROP_MAX_MESSAGE_SIZE, 4*1024));
- setRTT(getInt(opts, PROP_INITIAL_RTT, 30*1000));
+ setRTT(getInt(opts, PROP_INITIAL_RTT, 10*1000));
setReceiveWindow(getInt(opts, PROP_INITIAL_RECEIVE_WINDOW, 1));
setResendDelay(getInt(opts, PROP_INITIAL_RESEND_DELAY, 1000));
setSendAckDelay(getInt(opts, PROP_INITIAL_ACK_DELAY, 1000));
@@ -107,7 +107,7 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
if (opts.containsKey(PROP_MAX_MESSAGE_SIZE))
setMaxMessageSize(getInt(opts, PROP_MAX_MESSAGE_SIZE, Packet.MAX_PAYLOAD_SIZE));
if (opts.containsKey(PROP_INITIAL_RTT))
- setRTT(getInt(opts, PROP_INITIAL_RTT, 30*1000));
+ setRTT(getInt(opts, PROP_INITIAL_RTT, 10*1000));
if (opts.containsKey(PROP_INITIAL_RECEIVE_WINDOW))
setReceiveWindow(getInt(opts, PROP_INITIAL_RECEIVE_WINDOW, 1));
if (opts.containsKey(PROP_INITIAL_RESEND_DELAY))
diff --git a/core/java/src/net/i2p/CoreVersion.java b/core/java/src/net/i2p/CoreVersion.java
index 96ffdd9df..6d1f72c7d 100644
--- a/core/java/src/net/i2p/CoreVersion.java
+++ b/core/java/src/net/i2p/CoreVersion.java
@@ -14,8 +14,8 @@ package net.i2p;
*
*/
public class CoreVersion {
- public final static String ID = "$Revision: 1.31 $ $Date: 2005/03/18 17:34:53 $";
- public final static String VERSION = "0.5.0.4";
+ public final static String ID = "$Revision: 1.32 $ $Date: 2005/03/24 02:29:28 $";
+ public final static String VERSION = "0.5.0.5";
public static void main(String args[]) {
System.out.println("I2P Core version: " + VERSION);
diff --git a/core/java/src/net/i2p/crypto/HMACSHA256Generator.java b/core/java/src/net/i2p/crypto/HMACSHA256Generator.java
index 920cce5dd..be28c2355 100644
--- a/core/java/src/net/i2p/crypto/HMACSHA256Generator.java
+++ b/core/java/src/net/i2p/crypto/HMACSHA256Generator.java
@@ -1,33 +1,101 @@
package net.i2p.crypto;
+import java.util.Arrays;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
/**
- * Calculate the HMAC-SHA256 of a key+message. Currently FAKE - returns a stupid
- * kludgy hash: H(H(key) XOR H(data)). Fix me!
+ * Calculate the HMAC-SHA256 of a key+message.
*
*/
public class HMACSHA256Generator {
- public HMACSHA256Generator(I2PAppContext context) { // nop
+ private I2PAppContext _context;
+ public HMACSHA256Generator(I2PAppContext context) {
+ _context = context;
}
public static HMACSHA256Generator getInstance() {
return I2PAppContext.getGlobalContext().hmac();
}
+ private static final int PAD_LENGTH = 64;
+
+ private static final byte[] _IPAD = new byte[PAD_LENGTH];
+ private static final byte[] _OPAD = new byte[PAD_LENGTH];
+ static {
+ for (int i = 0; i < _IPAD.length; i++) {
+ _IPAD[i] = 0x36;
+ _OPAD[i] = 0x5C;
+ }
+ }
+
+
+ public Buffer createBuffer(int dataLen) { return new Buffer(dataLen); }
+
+ public class Buffer {
+ private byte padded[];
+ private byte innerBuf[];
+ private SHA256EntryCache.CacheEntry innerEntry;
+ private byte rv[];
+ private byte outerBuf[];
+ private SHA256EntryCache.CacheEntry outerEntry;
+
+ public Buffer(int dataLength) {
+ padded = new byte[PAD_LENGTH];
+ innerBuf = new byte[dataLength + PAD_LENGTH];
+ innerEntry = _context.sha().cache().acquire(innerBuf.length);
+ rv = new byte[Hash.HASH_LENGTH];
+ outerBuf = new byte[Hash.HASH_LENGTH + PAD_LENGTH];
+ outerEntry = _context.sha().cache().acquire(outerBuf.length);
+ }
+
+ public void releaseCached() {
+ _context.sha().cache().release(innerEntry);
+ _context.sha().cache().release(outerEntry);
+ }
+
+ public byte[] getHash() { return rv; }
+ }
+
/**
- * This should calculate the HMAC/SHA256, but it DOESNT. Its just a kludge.
- * Fix me.
+ * Calculate the HMAC of the data with the given key
*/
public Hash calculate(SessionKey key, byte data[]) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
-
- Hash hkey = SHA256Generator.getInstance().calculateHash(key.getData());
- Hash hdata = SHA256Generator.getInstance().calculateHash(data);
- return SHA256Generator.getInstance().calculateHash(DataHelper.xor(hkey.getData(), hdata.getData()));
+
+ Buffer buf = new Buffer(data.length);
+ calculate(key, data, buf);
+ Hash rv = new Hash(buf.rv);
+ buf.releaseCached();
+ return rv;
+ }
+
+ /**
+ * Calculate the HMAC of the data with the given key
+ */
+ public void calculate(SessionKey key, byte data[], Buffer buf) {
+ // inner hash
+ padKey(key.getData(), _IPAD, buf.padded);
+ System.arraycopy(buf.padded, 0, buf.innerBuf, 0, PAD_LENGTH);
+ System.arraycopy(data, 0, buf.innerBuf, PAD_LENGTH, data.length);
+
+ Hash h = _context.sha().calculateHash(buf.innerBuf, buf.innerEntry);
+
+ // outer hash
+ padKey(key.getData(), _OPAD, buf.padded);
+ System.arraycopy(buf.padded, 0, buf.outerBuf, 0, PAD_LENGTH);
+ System.arraycopy(h.getData(), 0, buf.outerBuf, PAD_LENGTH, Hash.HASH_LENGTH);
+
+ h = _context.sha().calculateHash(buf.outerBuf, buf.outerEntry);
+ System.arraycopy(h.getData(), 0, buf.rv, 0, Hash.HASH_LENGTH);
+ }
+
+ private static final void padKey(byte key[], byte pad[], byte out[]) {
+ for (int i = 0; i < SessionKey.KEYSIZE_BYTES; i++)
+ out[i] = (byte) (key[i] ^ pad[i]);
+ Arrays.fill(out, SessionKey.KEYSIZE_BYTES, PAD_LENGTH, pad[0]);
}
}
\ No newline at end of file
diff --git a/core/java/src/net/i2p/data/Base64.java b/core/java/src/net/i2p/data/Base64.java
index 85bbd2f28..554d5ab92 100644
--- a/core/java/src/net/i2p/data/Base64.java
+++ b/core/java/src/net/i2p/data/Base64.java
@@ -329,6 +329,8 @@ public class Base64 {
* replacing / with ~, and + with -
*/
private static String safeEncode(byte[] source, int off, int len, boolean useStandardAlphabet) {
+ if (len + off > source.length)
+ throw new ArrayIndexOutOfBoundsException("Trying to encode too much! source.len=" + source.length + " off=" + off + " len=" + len);
String encoded = encodeBytes(source, off, len, false);
if (useStandardAlphabet) {
// noop
diff --git a/core/java/src/net/i2p/data/LeaseSet.java b/core/java/src/net/i2p/data/LeaseSet.java
index 214b7df7e..f2f54aab8 100644
--- a/core/java/src/net/i2p/data/LeaseSet.java
+++ b/core/java/src/net/i2p/data/LeaseSet.java
@@ -35,6 +35,7 @@ public class LeaseSet extends DataStructureImpl {
private Signature _signature;
private volatile Hash _currentRoutingKey;
private volatile byte[] _routingKeyGenMod;
+ private boolean _receivedAsPublished;
/** um, no lease can last more than a year. */
private final static long MAX_FUTURE_EXPIRATION = 365 * 24 * 60 * 60 * 1000L;
@@ -47,6 +48,7 @@ public class LeaseSet extends DataStructureImpl {
setRoutingKey(null);
_leases = new ArrayList();
_routingKeyGenMod = null;
+ _receivedAsPublished = false;
}
public Destination getDestination() {
@@ -72,6 +74,14 @@ public class LeaseSet extends DataStructureImpl {
public void setSigningKey(SigningPublicKey key) {
_signingKey = key;
}
+
+ /**
+ * If true, we received this LeaseSet by a remote peer publishing it to
+ * us, rather than by searching for it ourselves or locally creating it.
+ *
+ */
+ public boolean getReceivedAsPublished() { return _receivedAsPublished; }
+ public void setReceivedAsPublished(boolean received) { _receivedAsPublished = received; }
public void addLease(Lease lease) {
if (lease == null) throw new IllegalArgumentException("erm, null lease");
diff --git a/core/java/test/net/i2p/crypto/HMACSHA256Bench.java b/core/java/test/net/i2p/crypto/HMACSHA256Bench.java
new file mode 100644
index 000000000..715c45ec0
--- /dev/null
+++ b/core/java/test/net/i2p/crypto/HMACSHA256Bench.java
@@ -0,0 +1,112 @@
+package net.i2p.crypto;
+
+/*
+ * Copyright (c) 2003, TheCrypto
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - Neither the name of the TheCrypto may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+import net.i2p.I2PAppContext;
+import net.i2p.data.Hash;
+import net.i2p.data.SessionKey;
+
+public class HMACSHA256Bench {
+ public static void main(String args[]) {
+ I2PAppContext ctx = I2PAppContext.getGlobalContext();
+ SessionKey key = ctx.keyGenerator().generateSessionKey();
+ Hash asdfs = HMACSHA256Generator.getInstance().calculate(key, "qwerty".getBytes());
+
+ int times = 100000;
+ long shorttime = 0;
+ long medtime = 0;
+ long longtime = 0;
+ long minShort = 0;
+ long maxShort = 0;
+ long minMed = 0;
+ long maxMed = 0;
+ long minLong = 0;
+ long maxLong = 0;
+
+ long shorttime1 = 0;
+ long medtime1 = 0;
+ long longtime1 = 0;
+ long minShort1 = 0;
+ long maxShort1 = 0;
+ long minMed1 = 0;
+ long maxMed1 = 0;
+ long minLong1 = 0;
+ long maxLong1 = 0;
+
+ byte[] smess = new String("abc").getBytes();
+ StringBuffer buf = new StringBuffer();
+ for (int x = 0; x < 2*1024; x++) {
+ buf.append("a");
+ }
+ byte[] mmess = buf.toString().getBytes(); // new String("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq").getBytes();
+ buf = new StringBuffer();
+ for (int x = 0; x < 10000; x++) {
+ buf.append("a");
+ }
+ byte[] lmess = buf.toString().getBytes();
+
+ HMACSHA256Generator.Buffer sbuf = ctx.hmac().createBuffer(smess.length);
+ HMACSHA256Generator.Buffer mbuf = ctx.hmac().createBuffer(mmess.length);
+ HMACSHA256Generator.Buffer lbuf = ctx.hmac().createBuffer(lmess.length);
+
+ // warm up the engines
+ ctx.hmac().calculate(key, smess, sbuf);
+ ctx.hmac().calculate(key, mmess, mbuf);
+ ctx.hmac().calculate(key, lmess, lbuf);
+
+ long before = System.currentTimeMillis();
+ for (int x = 0; x < times; x++)
+ ctx.hmac().calculate(key, smess, sbuf);
+ long after = System.currentTimeMillis();
+
+ display(times, before, after, smess.length, "3 byte");
+
+ before = System.currentTimeMillis();
+ for (int x = 0; x < times; x++)
+ ctx.hmac().calculate(key, mmess, mbuf);
+ after = System.currentTimeMillis();
+
+ display(times, before, after, mmess.length, "2KB");
+
+ before = System.currentTimeMillis();
+ for (int x = 0; x < times; x++)
+ ctx.hmac().calculate(key, lmess, lbuf);
+ after = System.currentTimeMillis();
+
+ display(times, before, after, lmess.length, "10KB");
+ }
+
+ private static void display(int times, long before, long after, int len, String name) {
+ double rate = ((double)times)/(((double)after-(double)before)/1000.0d);
+ double kbps = ((double)len/1024.0d)*((double)times)/(((double)after-(double)before)/1000.0d);
+ System.out.println(name + " HMAC-SHA256 pulled " + kbps + "KBps or " + rate + " calcs per second");
+ }
+}
+
diff --git a/history.txt b/history.txt
index 5af5f9ff1..b7f507747 100644
--- a/history.txt
+++ b/history.txt
@@ -1,4 +1,25 @@
-$Id: history.txt,v 1.181 2005/03/24 23:07:06 jrandom Exp $
+$Id: history.txt,v 1.182 2005/03/26 02:13:38 jrandom Exp $
+
+* 2005-03-29 0.5.0.5 released
+
+2005-03-29 jrandom
+ * Decreased the initial RTT estimate to 10s to allow more retries.
+ * Increased the default netDb store replication factor from 2 to 6 to take
+ into consideration tunnel failures.
+ * Address some statistical anonymity attacks against the netDb that could
+ be mounted by an active internal adversary by only answering lookups for
+ leaseSets we received through an unsolicited store.
+ * Don't throttle lookup responses (we throttle enough elsewhere)
+ * Fix the NewsFetcher so that it doesn't incorrectly resume midway through
+ the file (thanks nickster!)
+ * Updated the I2PTunnel HTML (thanks postman!)
+ * Added support to the I2PTunnel pages for the URL parameter "passphrase",
+ which, if matched against the router.config "i2ptunnel.passphrase" value,
+ skips the nonce check. If the config prop doesn't exist or is blank, no
+ passphrase is accepted.
+ * Implemented HMAC-SHA256.
+ * Enable the tunnel batching with a 500ms delay by default
+ * Dropped compatability with 0.5.0.3 and earlier releases
2005-03-26 jrandom
* Added some error handling and fairly safe to cache data to the streaming
diff --git a/installer/install.xml b/installer/install.xml
index b515ebcec..d8306c253 100644
--- a/installer/install.xml
+++ b/installer/install.xml
@@ -4,7 +4,7 @@
i2p
- 0.5.0.4
+ 0.5.0.5
diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java
index 2a93e5091..d8a40488c 100644
--- a/router/java/src/net/i2p/router/RouterVersion.java
+++ b/router/java/src/net/i2p/router/RouterVersion.java
@@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
- public final static String ID = "$Revision: 1.174 $ $Date: 2005/03/24 23:07:06 $";
- public final static String VERSION = "0.5.0.4";
- public final static long BUILD = 2;
+ public final static String ID = "$Revision: 1.175 $ $Date: 2005/03/26 02:13:38 $";
+ public final static String VERSION = "0.5.0.5";
+ public final static long BUILD = 0;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID);
diff --git a/router/java/src/net/i2p/router/StatisticsManager.java b/router/java/src/net/i2p/router/StatisticsManager.java
index 10152286c..e3983f655 100644
--- a/router/java/src/net/i2p/router/StatisticsManager.java
+++ b/router/java/src/net/i2p/router/StatisticsManager.java
@@ -113,8 +113,9 @@ public class StatisticsManager implements Service {
includeRate("tunnel.buildFailure", stats, new long[] { 60*60*1000 });
includeRate("tunnel.buildSuccess", stats, new long[] { 60*60*1000 });
- includeRate("tunnel.batchDelaySent", stats, new long[] { 10*60*1000, 60*60*1000 });
+ //includeRate("tunnel.batchDelaySent", stats, new long[] { 10*60*1000, 60*60*1000 });
includeRate("tunnel.batchMultipleCount", stats, new long[] { 10*60*1000, 60*60*1000 });
+ includeRate("tunnel.corruptMessage", stats, new long[] { 60*60*1000l, 3*60*60*1000l });
includeRate("router.throttleTunnelProbTestSlow", stats, new long[] { 60*60*1000 });
includeRate("router.throttleTunnelProbTooFast", stats, new long[] { 60*60*1000 });
diff --git a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java
index b10f06cf8..73af5c9e3 100644
--- a/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java
+++ b/router/java/src/net/i2p/router/message/OutboundClientMessageOneShotJob.java
@@ -418,7 +418,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
long sendTime = getContext().clock().now() - _start;
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Failed to send the message " + _clientMessageId + " after "
- + sendTime + "ms", new Exception("Message send failure"));
+ + sendTime + "ms");
long messageDelay = getContext().throttle().getMessageDelay();
long tunnelLag = getContext().throttle().getTunnelLag();
diff --git a/router/java/src/net/i2p/router/networkdb/DatabaseLookupMessageHandler.java b/router/java/src/net/i2p/router/networkdb/DatabaseLookupMessageHandler.java
index 29ee4f3fb..db696461a 100644
--- a/router/java/src/net/i2p/router/networkdb/DatabaseLookupMessageHandler.java
+++ b/router/java/src/net/i2p/router/networkdb/DatabaseLookupMessageHandler.java
@@ -34,7 +34,7 @@ public class DatabaseLookupMessageHandler implements HandlerJobBuilder {
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
_context.statManager().addRateData("netDb.lookupsReceived", 1, 0);
- if (_context.throttle().acceptNetDbLookupRequest(((DatabaseLookupMessage)receivedMessage).getSearchKey())) {
+ if (true || _context.throttle().acceptNetDbLookupRequest(((DatabaseLookupMessage)receivedMessage).getSearchKey())) {
return new HandleDatabaseLookupMessageJob(_context, (DatabaseLookupMessage)receivedMessage, from, fromHash);
} else {
if (_log.shouldLog(Log.INFO))
diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java
index 9e69e7254..7de9ec659 100644
--- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java
+++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java
@@ -40,6 +40,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
private RouterIdentity _from;
private Hash _fromHash;
private final static int MAX_ROUTERS_RETURNED = 3;
+ private final static int CLOSENESS_THRESHOLD = 10; // StoreJob.REDUNDANCY * 2
private final static int REPLY_TIMEOUT = 60*1000;
private final static int MESSAGE_PRIORITY = 300;
@@ -48,6 +49,9 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
_log = getContext().logManager().getLog(HandleDatabaseLookupMessageJob.class);
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
+ getContext().statManager().createRateStat("netDb.lookupsMatchedReceivedPublished", "How many netDb lookups did we have the data for that were published to us?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
+ getContext().statManager().createRateStat("netDb.lookupsMatchedLocalClosest", "How many netDb lookups for local data were received where we are the closest peers?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
+ getContext().statManager().createRateStat("netDb.lookupsMatchedLocalNotClosest", "How many netDb lookups for local data were received where we are NOT the closest peers?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
@@ -65,26 +69,26 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
+ " (tunnel " + _message.getReplyTunnel() + ")");
}
- if (getContext().netDb().lookupRouterInfoLocally(_message.getFrom()) == null) {
- // hmm, perhaps don't always send a lookup for this...
- // but for now, wtf, why not. we may even want to adjust it so that
- // we penalize or benefit peers who send us that which we can or
- // cannot lookup
- getContext().netDb().lookupRouterInfo(_message.getFrom(), null, null, REPLY_TIMEOUT);
- }
-
- // whatdotheywant?
- handleRequest(fromKey);
- }
-
- private void handleRequest(Hash fromKey) {
LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_message.getSearchKey());
if (ls != null) {
- // send that lease set to the _message.getFromHash peer
- if (_log.shouldLog(Log.DEBUG))
- _log.debug("We do have key " + _message.getSearchKey().toBase64()
- + " locally as a lease set. sending to " + fromKey.toBase64());
- sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
+ // only answer a request for a LeaseSet if it has been published
+ // to us, or, if its local, if we would have published to ourselves
+ if (ls.getReceivedAsPublished()) {
+ getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0);
+ sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
+ } else {
+ Set routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
+ CLOSENESS_THRESHOLD,
+ _message.getDontIncludePeers());
+ if (getContext().clientManager().isLocal(ls.getDestination()) &&
+ weAreClosest(routerInfoSet)) {
+ getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1, 0);
+ sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
+ } else {
+ getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1, 0);
+ sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel());
+ }
+ }
} else {
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(_message.getSearchKey());
if (info != null) {
@@ -106,6 +110,17 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
}
}
+ private boolean weAreClosest(Set routerInfoSet) {
+ boolean weAreClosest = false;
+ for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) {
+ RouterInfo cur = (RouterInfo)iter.next();
+ if (cur.getIdentity().calculateHash().equals(getContext().routerHash())) {
+ return true;
+ }
+ }
+ return false;
+ }
+
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending data matching key key " + key.toBase64() + " to peer " + toPeer.toBase64()
@@ -129,27 +144,27 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
_log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = "
+ routerInfoSet.size() + " tunnel " + replyTunnel);
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(getContext());
- msg.setFromHash(getContext().router().getRouterInfo().getIdentity().getHash());
+ msg.setFromHash(getContext().routerHash());
msg.setSearchKey(key);
for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) {
RouterInfo peer = (RouterInfo)iter.next();
msg.addReply(peer.getIdentity().getHash());
+ if (msg.getNumReplies() >= MAX_ROUTERS_RETURNED)
+ break;
}
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
sendMessage(msg, toPeer, replyTunnel); // should this go via garlic messages instead?
}
private void sendMessage(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
- Job send = null;
if (replyTunnel != null) {
sendThroughTunnel(message, toPeer, replyTunnel);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending reply directly to " + toPeer);
- send = new SendMessageDirectJob(getContext(), message, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
+ Job send = new SendMessageDirectJob(getContext(), message, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
+ getContext().netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
}
-
- getContext().netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
}
private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
@@ -171,28 +186,6 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
}
}
- private void sendToGateway(I2NPMessage message, Hash toPeer, TunnelId replyTunnel, TunnelInfo info) {
- if (_log.shouldLog(Log.INFO))
- _log.info("Want to reply to a db request via a tunnel, but we're a participant in the reply! so send it to the gateway");
-
- if ( (toPeer == null) || (replyTunnel == null) ) {
- if (_log.shouldLog(Log.ERROR))
- _log.error("Someone br0ke us. where is this message supposed to go again?", getAddedBy());
- return;
- }
-
- long expiration = REPLY_TIMEOUT + getContext().clock().now();
-
- TunnelGatewayMessage msg = new TunnelGatewayMessage(getContext());
- msg.setMessage(message);
- msg.setTunnelId(replyTunnel);
- msg.setMessageExpiration(expiration);
- getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, toPeer, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
-
- String bodyType = message.getClass().getName();
- getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelGatewayMessage.class.getName(), msg.getUniqueId());
- }
-
public String getName() { return "Handle Database Lookup Message"; }
public void dropped() {
diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java
index 080df0242..2e4073fc4 100644
--- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java
+++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseStoreMessageJob.java
@@ -10,6 +10,7 @@ package net.i2p.router.networkdb;
import java.util.Date;
import net.i2p.data.Hash;
+import net.i2p.data.LeaseSet;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
@@ -48,8 +49,18 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
boolean wasNew = false;
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
try {
- Object match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
- wasNew = (null == match);
+ LeaseSet ls = _message.getLeaseSet();
+ // mark it as something we received, so we'll answer queries
+ // for it. this flag does NOT get set on entries that we
+ // receive in response to our own lookups.
+ ls.setReceivedAsPublished(true);
+ LeaseSet match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
+ if (match == null) {
+ wasNew = true;
+ } else {
+ wasNew = false;
+ match.setReceivedAsPublished(true);
+ }
} catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage();
}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
index 0f71b172a..a8359e09f 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
@@ -36,8 +36,8 @@ class StoreJob extends JobImpl {
private long _expiration;
private PeerSelector _peerSelector;
- private final static int PARALLELIZATION = 1; // how many sent at a time
- private final static int REDUNDANCY = 2; // we want the data sent to 2 peers
+ private final static int PARALLELIZATION = 2; // how many sent at a time
+ private final static int REDUNDANCY = 6; // we want the data sent to 6 peers
/**
* additionally send to 1 outlier(s), in case all of the routers chosen in our
* REDUNDANCY set are attacking us by accepting DbStore messages but dropping
diff --git a/router/java/src/net/i2p/router/transport/tcp/TCPTransport.java b/router/java/src/net/i2p/router/transport/tcp/TCPTransport.java
index f32491960..70130f32c 100644
--- a/router/java/src/net/i2p/router/transport/tcp/TCPTransport.java
+++ b/router/java/src/net/i2p/router/transport/tcp/TCPTransport.java
@@ -87,8 +87,7 @@ public class TCPTransport extends TransportImpl {
public static final int DEFAULT_ESTABLISHERS = 3;
/** Ordered list of supported I2NP protocols */
- public static final int[] SUPPORTED_PROTOCOLS = new int[] { 3
- , 4}; // forward compat, to drop <= 0.5.0.3
+ public static final int[] SUPPORTED_PROTOCOLS = new int[] { 4 }; // drop <= 0.5.0.3
/** blah, people shouldnt use defaults... */
public static final int DEFAULT_LISTEN_PORT = 8887;
diff --git a/router/java/src/net/i2p/router/tunnel/BatchedPreprocessor.java b/router/java/src/net/i2p/router/tunnel/BatchedPreprocessor.java
index 4231197c5..b213f5738 100644
--- a/router/java/src/net/i2p/router/tunnel/BatchedPreprocessor.java
+++ b/router/java/src/net/i2p/router/tunnel/BatchedPreprocessor.java
@@ -34,16 +34,18 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
- 1 // 0x00 ending the padding
- 4; // 4 byte checksum
+ private static final boolean DISABLE_BATCHING = false;
+
/* not final or private so the test code can adjust */
static long DEFAULT_DELAY = 500;
/** wait up to 2 seconds before sending a small message */
protected long getSendDelay() { return DEFAULT_DELAY; }
public boolean preprocessQueue(List pending, TunnelGateway.Sender sender, TunnelGateway.Receiver rec) {
- if (_log.shouldLog(Log.INFO))
- _log.info("Preprocess queue with " + pending.size() + " to send");
+ if (_log.shouldLog(Log.DEBUG))
+ _log.debug("Preprocess queue with " + pending.size() + " to send");
- if (getSendDelay() <= 0) {
+ if (DISABLE_BATCHING || getSendDelay() <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info("No batching, send all messages immediately");
while (pending.size() > 0) {
@@ -131,8 +133,8 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
}
}
- if (_log.shouldLog(Log.INFO))
- _log.info("Sent everything on the list (pending=" + pending.size() + ")");
+ if (_log.shouldLog(Log.DEBUG))
+ _log.debug("Sent everything on the list (pending=" + pending.size() + ")");
// sent everything from the pending list, no need to delayed flush
return false;
@@ -150,9 +152,6 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending " + startAt + ":" + sendThrough + " out of " + pending.size());
byte preprocessed[] = _dataCache.acquire().getData();
- ByteArray ivBuf = _ivCache.acquire();
- byte iv[] = ivBuf.getData(); // new byte[IV_SIZE];
- _context.random().nextBytes(iv);
int offset = 0;
offset = writeFragments(pending, startAt, sendThrough, preprocessed, offset);
@@ -160,6 +159,17 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
// so we need to format, pad, and rearrange according to the spec to
// generate the final preprocessed data
+ if (offset <= 0) {
+ StringBuffer buf = new StringBuffer(128);
+ buf.append("wtf, written offset is ").append(offset);
+ buf.append(" for ").append(startAt).append(" through ").append(sendThrough);
+ for (int i = startAt; i <= sendThrough; i++) {
+ buf.append(" ").append(pending.get(i).toString());
+ }
+ _log.log(Log.CRIT, buf.toString());
+ return;
+ }
+
preprocess(preprocessed, offset);
sender.sendPreprocessed(preprocessed, rec);
diff --git a/router/java/src/net/i2p/router/tunnel/BatchedRouterPreprocessor.java b/router/java/src/net/i2p/router/tunnel/BatchedRouterPreprocessor.java
index b8f179516..7c8552e9d 100644
--- a/router/java/src/net/i2p/router/tunnel/BatchedRouterPreprocessor.java
+++ b/router/java/src/net/i2p/router/tunnel/BatchedRouterPreprocessor.java
@@ -17,7 +17,7 @@ public class BatchedRouterPreprocessor extends BatchedPreprocessor {
*/
public static final String PROP_BATCH_FREQUENCY = "batchFrequency";
public static final String PROP_ROUTER_BATCH_FREQUENCY = "router.batchFrequency";
- public static final int DEFAULT_BATCH_FREQUENCY = 0;
+ public static final int DEFAULT_BATCH_FREQUENCY = 500;
public BatchedRouterPreprocessor(RouterContext ctx) {
this(ctx, null);
diff --git a/router/java/src/net/i2p/router/tunnel/FragmentHandler.java b/router/java/src/net/i2p/router/tunnel/FragmentHandler.java
index b6c71fe97..ae95d0330 100644
--- a/router/java/src/net/i2p/router/tunnel/FragmentHandler.java
+++ b/router/java/src/net/i2p/router/tunnel/FragmentHandler.java
@@ -49,6 +49,8 @@ public class FragmentHandler {
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
_context.statManager().createRateStat("tunnel.fragmentedDropped", "How many fragments were in a partially received yet failed message?",
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
+ _context.statManager().createRateStat("tunnel.corruptMessage", "How many corrupted messages arrived?",
+ "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
}
/**
@@ -61,9 +63,11 @@ public class FragmentHandler {
public void receiveTunnelMessage(byte preprocessed[], int offset, int length) {
boolean ok = verifyPreprocessed(preprocessed, offset, length);
if (!ok) {
- _log.error("Unable to verify preprocessed data (pre.length=" + preprocessed.length
- + " off=" +offset + " len=" + length, new Exception("failed"));
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("Unable to verify preprocessed data (pre.length="
+ + preprocessed.length + " off=" +offset + " len=" + length);
_cache.release(new ByteArray(preprocessed));
+ _context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
return;
}
offset += HopProcessor.IV_LENGTH; // skip the IV
@@ -84,6 +88,7 @@ public class FragmentHandler {
} catch (RuntimeException e) {
if (_log.shouldLog(Log.ERROR))
_log.error("Corrupt fragment received: offset = " + offset, e);
+ _context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
throw e;
} finally {
// each of the FragmentedMessages populated make a copy out of the
@@ -110,8 +115,19 @@ public class FragmentHandler {
private boolean verifyPreprocessed(byte preprocessed[], int offset, int length) {
// now we need to verify that the message was received correctly
int paddingEnd = HopProcessor.IV_LENGTH + 4;
- while (preprocessed[offset+paddingEnd] != (byte)0x00)
+ while (preprocessed[offset+paddingEnd] != (byte)0x00) {
paddingEnd++;
+ if (offset+paddingEnd >= length) {
+ if (_log.shouldLog(Log.ERROR))
+ _log.error("Corrupt tunnel message padding");
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("cannot verify, going past the end [off="
+ + offset + " len=" + length + " paddingEnd="
+ + paddingEnd + " data:\n"
+ + Base64.encode(preprocessed, offset, length));
+ return false;
+ }
+ }
paddingEnd++; // skip the last
ByteArray ba = _validateCache.acquire(); // larger than necessary, but always sufficient
@@ -129,10 +145,11 @@ public class FragmentHandler {
boolean eq = DataHelper.eq(v.getData(), 0, preprocessed, offset + HopProcessor.IV_LENGTH, 4);
if (!eq) {
if (_log.shouldLog(Log.ERROR))
- _log.error("Endpoint data doesn't match: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1));
- if (_log.shouldLog(Log.DEBUG))
- _log.debug("nomatching endpoint: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1) + "\n"
- + Base64.encode(preprocessed, offset + paddingEnd, preV.length-HopProcessor.IV_LENGTH));
+ _log.error("Corrupt tunnel message - verification fails");
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("nomatching endpoint: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1) + "\n"
+ + " offset=" + offset + " length=" + length + " paddingEnd=" + paddingEnd
+ + Base64.encode(preprocessed, offset, length));
}
_context.sha().cache().release(cache);
@@ -380,8 +397,8 @@ public class FragmentHandler {
if (removed && !_msg.getReleased()) {
_failed++;
noteFailure(_msg.getMessageId());
- if (_log.shouldLog(Log.ERROR))
- _log.error("Dropped failed fragmented message: " + _msg);
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("Dropped failed fragmented message: " + _msg);
_context.statManager().addRateData("tunnel.fragmentedDropped", _msg.getFragmentCount(), _msg.getLifetime());
_msg.failed();
} else {
diff --git a/router/java/src/net/i2p/router/tunnel/TunnelGateway.java b/router/java/src/net/i2p/router/tunnel/TunnelGateway.java
index 7ec95790b..bb3d5b696 100644
--- a/router/java/src/net/i2p/router/tunnel/TunnelGateway.java
+++ b/router/java/src/net/i2p/router/tunnel/TunnelGateway.java
@@ -8,6 +8,7 @@ import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
+import net.i2p.router.Router;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
@@ -87,7 +88,7 @@ public class TunnelGateway {
_messagesSent++;
boolean delayedFlush = false;
- Pending cur = new Pending(msg, toRouter, toTunnel);
+ Pending cur = new PendingImpl(msg, toRouter, toTunnel);
synchronized (_queue) {
_queue.add(cur);
delayedFlush = _preprocessor.preprocessQueue(_queue, _sender, _receiver);
@@ -96,9 +97,9 @@ public class TunnelGateway {
// expire any as necessary, even if its framented
for (int i = 0; i < _queue.size(); i++) {
Pending m = (Pending)_queue.get(i);
- if (m.getExpiration() < _lastFlush) {
+ if (m.getExpiration() + Router.CLOCK_FUDGE_FACTOR < _lastFlush) {
if (_log.shouldLog(Log.ERROR))
- _log.error("Expire on the queue: " + m);
+ _log.error("Expire on the queue (size=" + _queue.size() + "): " + m);
_queue.remove(i);
i--;
}
@@ -140,13 +141,13 @@ public class TunnelGateway {
}
public static class Pending {
- private Hash _toRouter;
- private TunnelId _toTunnel;
+ protected Hash _toRouter;
+ protected TunnelId _toTunnel;
private long _messageId;
- private long _expiration;
- private byte _remaining[];
- private int _offset;
- private int _fragmentNumber;
+ protected long _expiration;
+ protected byte _remaining[];
+ protected int _offset;
+ protected int _fragmentNumber;
public Pending(I2NPMessage message, Hash toRouter, TunnelId toTunnel) {
_toRouter = toRouter;
@@ -174,6 +175,35 @@ public class TunnelGateway {
/** ok, fragment sent, increment what the next will be */
public void incrementFragmentNumber() { _fragmentNumber++; }
}
+ private class PendingImpl extends Pending {
+ private long _created;
+
+ public PendingImpl(I2NPMessage message, Hash toRouter, TunnelId toTunnel) {
+ super(message, toRouter, toTunnel);
+ _created = _context.clock().now();
+ }
+
+ public String toString() {
+ StringBuffer buf = new StringBuffer(64);
+ buf.append("Message on ");
+ buf.append(TunnelGateway.this.toString());
+ if (_toRouter != null) {
+ buf.append(" targetting ");
+ buf.append(_toRouter.toBase64()).append(" ");
+ if (_toTunnel != null)
+ buf.append(_toTunnel.getTunnelId());
+ }
+ long now = _context.clock().now();
+ buf.append(" actual lifetime ");
+ buf.append(now - _created).append("ms");
+ buf.append(" potential lifetime ");
+ buf.append(_expiration - _created).append("ms");
+ buf.append(" size ").append(_remaining.length);
+ buf.append(" offset ").append(_offset);
+ buf.append(" frag ").append(_fragmentNumber);
+ return buf.toString();
+ }
+ }
private class DelayedFlush implements SimpleTimer.TimedEvent {
public void timeReached() {
diff --git a/router/java/src/net/i2p/router/tunnel/pool/RequestTunnelJob.java b/router/java/src/net/i2p/router/tunnel/pool/RequestTunnelJob.java
index 5275dedd3..ab6dd977e 100644
--- a/router/java/src/net/i2p/router/tunnel/pool/RequestTunnelJob.java
+++ b/router/java/src/net/i2p/router/tunnel/pool/RequestTunnelJob.java
@@ -45,7 +45,7 @@ public class RequestTunnelJob extends JobImpl {
private boolean _isFake;
private boolean _isExploratory;
- static final int HOP_REQUEST_TIMEOUT = 30*1000;
+ static final int HOP_REQUEST_TIMEOUT = 20*1000;
private static final int LOOKUP_TIMEOUT = 10*1000;
public RequestTunnelJob(RouterContext ctx, TunnelCreatorConfig cfg, Job onCreated, Job onFailed, int hop, boolean isFake, boolean isExploratory) {
diff --git a/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java b/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java
index afa92ac6e..8a3b53d26 100644
--- a/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java
+++ b/router/java/src/net/i2p/router/tunnel/pool/TunnelPool.java
@@ -37,11 +37,11 @@ public class TunnelPool {
private long _lastSelectionPeriod;
/**
- * Only 3 builds per minute per pool, even if we have failing tunnels,
+ * Only 5 builds per minute per pool, even if we have failing tunnels,
* etc. On overflow, the necessary additional tunnels are built by the
* RefreshJob
*/
- private static final int MAX_BUILDS_PER_MINUTE = 3;
+ private static final int MAX_BUILDS_PER_MINUTE = 5;
public TunnelPool(RouterContext ctx, TunnelPoolManager mgr, TunnelPoolSettings settings, TunnelPeerSelector sel, TunnelBuilder builder) {
_context = ctx;