Compare commits

...

21 Commits

Author SHA1 Message Date
jrandom
a95a968fa8 * 2004-10-18 0.4.1.3 released
2004-10-18  jrandom
    * Allow sending messages with a section of a byte array.
    * Reduced stats published.
2004-10-18 19:07:59 +00:00
jrandom
6c08941d8b updated curiosity.i2p 2004-10-18 18:23:13 +00:00
jrandom
e13a5b3865 added blueheron.i2p 2004-10-17 21:39:06 +00:00
jrandom
9011d5604a 2004-10-17 jrandom
* Don't b0rk on whitespace in the router address.
2004-10-17 21:03:05 +00:00
jrandom
78aa4ca137 updated files.i2p 2004-10-17 08:45:16 +00:00
jrandom
93111842df clarify history - we reduce the capacity calc, not the 'isFailing'.
* More aggressively reduce the capacity of peers if their tunnels are
      failing so that we move off them quicker.
2004-10-17 04:00:21 +00:00
jrandom
88693f8adc 2004-10-16 jrandom
* More aggressively fail peers if their tunnels are failing so that we
      move off them quicker.
    * Simplify some data structure serialization for reuse in the streaming
      lib, as well as add support for signing and verifying partial byte
      arrays.
    * Logging updates
2004-10-17 03:58:08 +00:00
jrandom
f904b012e9 initial impl for the new streaming lib (saying this isn't done should be obvious, but the
packet spec is at a save point)
2004-10-17 03:47:03 +00:00
jrandom
cebe0a151f added utansans.i2p 2004-10-16 21:45:55 +00:00
jrandom
8fffad0891 2004-10-16 jrandom
* Increased the default minimum tunnel test time to 5 seconds, since we
      still see the occational message processing time spike to 2 seconds.
    * Update the SimpleTimer to allow rescheduling a task thats already
      queued (useful for the new streaming lib).
2004-10-16 18:00:47 +00:00
jrandom
fb1263dad7 2004-10-15 jrandom
* Replaced old minimum tunnel test timeout of 1s with a configurable
      value (router.config property "router.tunnelTestMinimum", with the
      default of 2s).
2004-10-15 17:39:18 +00:00
jrandom
28c5d6c10d 2004-10-14 jrandom
* Tunnel rejection is no longer a sign of an overwhelmingly loaded
      peer, so don't use it as a key point of the IsFailing calculator.
      We still use it as a key point of the Capacity calculator, however.
2004-10-15 01:20:12 +00:00
jrandom
e7a6f6836e added irc.orz.i2p 2004-10-14 22:35:11 +00:00
jrandom
f8ffe016d1 2004-10-14 jrandom
* Allow for a configurable tunnel "growth factor", rather than trying
      to achieve a steady state.  This will let us grow gradually when
      the router is needed more, rather than blindly accepting the request
      or arbitrarily choking it at an averaged value.  Configure this with
      "router.tunnelGrowthFactor" in the router.config (default "1.5").
    * Adjust the tunnel test timeouts dynamically - rather than the old
      flat 30s (!!!) timeout, we set the timeout to 2x the average tunnel
      test time (the deviation factor can be adjusted by setting
      "router.tunnelTestDeviation" to "3.0" or whatever).  This should help
      find the 'good' tunnels.
    * Added some crazy debugging to try and track down an intermittent hang.
2004-10-14 20:02:45 +00:00
jrandom
ec322f0966 added nano.i2p 2004-10-13 20:42:35 +00:00
jrandom
0674709fc6 added ragnarok.i2p 2004-10-13 20:33:19 +00:00
jrandom
d91ac7ef21 2004-10-13 jrandom
* Fix the probabalistic tunnel reject (we always accepted everything,
      since the docs on java.util.Random.nextDouble() are wrong..)
    * Fixed a race on startup (thanks Quadn!)
2004-10-13 19:40:47 +00:00
jrandom
2f0c3c7baf added marcos.i2p 2004-10-13 16:29:27 +00:00
jrandom
be68407707 duh (oops) 2004-10-12 21:50:17 +00:00
jrandom
f799a25aeb 2004-10-12 jrandom
* Disable the probabalistic drop by default (enable via the router config
      property "tcp.dropProbabalistically=true")
    * Disable the actual watchdog shutdown by default, but keep track of more
      variables and log a lot more when it occurs (enable via the router
      config property "watchdog.haltOnHang=true")
    * Implement some tunnel participation smoothing by refusing requests
      probabalistically as our participating tunnel count exceeds the previous
      hour's, or when the 10 minute average tunnel test time exceeds the 60
      minute average tunnel test time.  The probabilities in both cases are
      oldAverage / #current, so if you're suddenly flooded with 200 tunnels
      and you had previously only participated in 50, you'll have a 25% chance
      of accepting a subsequent request.
2004-10-12 21:29:41 +00:00
duck
8329d045f1 confirm removal 2004-10-11 00:23:26 +00:00
31 changed files with 1365 additions and 116 deletions

View File

@@ -63,7 +63,7 @@ class WebEditPageFormGenerator {
addOptions(buf, controller);
buf.append("<input type=\"submit\" name=\"action\" value=\"Save\">\n");
buf.append("<input type=\"submit\" name=\"action\" value=\"Remove\">\n");
buf.append(" <i>confirm</i> <input type=\"checkbox\" name=\"removeConfirm\" value=\"true\" />\n");
buf.append(" <i>confirm removal:</i> <input type=\"checkbox\" name=\"removeConfirm\" value=\"true\" />\n");
buf.append("</form>\n");
return buf.toString();
}
@@ -83,7 +83,7 @@ class WebEditPageFormGenerator {
addOptions(buf, controller);
buf.append("<input type=\"submit\" name=\"action\" value=\"Save\"><br />\n");
buf.append("<input type=\"submit\" name=\"action\" value=\"Remove\">\n");
buf.append(" <i>confirm</i> <input type=\"checkbox\" name=\"removeConfirm\" value=\"true\" />\n");
buf.append(" <i>confirm removal:</i> <input type=\"checkbox\" name=\"removeConfirm\" value=\"true\" />\n");
buf.append("</form>\n");
return buf.toString();
}
@@ -118,7 +118,7 @@ class WebEditPageFormGenerator {
addOptions(buf, controller);
buf.append("<input type=\"submit\" name=\"action\" value=\"Save\">\n");
buf.append("<input type=\"submit\" name=\"action\" value=\"Remove\">\n");
buf.append(" <i>confirm</i> <input type=\"checkbox\" name=\"removeConfirm\" value=\"true\" />\n");
buf.append(" <i>confirm removal:</i> <input type=\"checkbox\" name=\"removeConfirm\" value=\"true\" />\n");
buf.append("</form>\n");
return buf.toString();
}

View File

@@ -0,0 +1,216 @@
package net.i2p.client.streaming;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import net.i2p.data.ByteArray;
/**
* Stream that can be given messages out of order
* yet present them in order.
*
*/
public class MessageInputStream extends InputStream {
/**
* List of ByteArray objects of data ready to be read,
* with the first ByteArray at index 0, and the next
* actual byte to be read at _readyDataBlockIndex of
* that array.
*
*/
private List _readyDataBlocks;
private int _readyDataBlockIndex;
/** highest message ID used in the readyDataBlocks */
private long _highestReadyBlockId;
/**
* Message ID (Long) to ByteArray for blocks received
* out of order when there are lower IDs not yet
* received
*/
private Map _notYetReadyBlocks;
/**
* if we have received a flag saying there won't be later messages, EOF
* after we have cleared what we have received.
*/
private boolean _closeReceived;
/** if we don't want any more data, ignore the data */
private boolean _locallyClosed;
private int _readTimeout;
private Object _dataLock;
public MessageInputStream() {
_readyDataBlocks = new ArrayList(4);
_readyDataBlockIndex = 0;
_highestReadyBlockId = -1;
_readTimeout = -1;
_notYetReadyBlocks = new HashMap(4);
_dataLock = new Object();
_closeReceived = false;
_locallyClosed = false;
}
/** What is the highest block ID we've completely received through? */
public long getHighestReadyBockId() {
synchronized (_dataLock) {
return _highestReadyBlockId;
}
}
/**
* Ascending list of block IDs greater than the highest
* ready block ID, or null if there aren't any.
*
*/
public long[] getOutOfOrderBlocks() {
long blocks[] = null;
synchronized (_dataLock) {
int num = _notYetReadyBlocks.size();
if (num <= 0) return null;
blocks = new long[num];
int i = 0;
for (Iterator iter = _notYetReadyBlocks.keySet().iterator(); iter.hasNext(); ) {
Long id = (Long)iter.next();
blocks[i] = id.longValue();
i++;
}
}
Arrays.sort(blocks);
return blocks;
}
/** how many blocks have we received that we still have holes before? */
public int getOutOfOrderBlockCount() {
synchronized (_dataLock) {
return _notYetReadyBlocks.size();
}
}
/**
* how long a read() call should block (if less than 0, block indefinitely,
* but if it is 0, do not block at all)
*/
public int getReadTimeout() { return _readTimeout; }
public void setReadTimeout(int timeout) { _readTimeout = timeout; }
/**
* A new message has arrived - toss it on the appropriate queue (moving
* previously pending messages to the ready queue if it fills the gap, etc)
*
*/
public void messageReceived(long messageId, byte payload[]) {
synchronized (_dataLock) {
if (messageId <= _highestReadyBlockId) return; // already received
if (_highestReadyBlockId + 1 == messageId) {
if (!_locallyClosed)
_readyDataBlocks.add(new ByteArray(payload));
_highestReadyBlockId = messageId;
// now pull in any previously pending blocks
while (_notYetReadyBlocks.containsKey(new Long(_highestReadyBlockId + 1))) {
_readyDataBlocks.add(_notYetReadyBlocks.get(new Long(_highestReadyBlockId + 1)));
_highestReadyBlockId++;
}
_dataLock.notifyAll();
} else {
if (_locallyClosed) // dont need the payload, just the msgId in order
_notYetReadyBlocks.put(new Long(messageId), new ByteArray(null));
else
_notYetReadyBlocks.put(new Long(messageId), new ByteArray(payload));
}
}
}
public int read() throws IOException {
if (_locallyClosed) throw new IOException("Already locally closed");
synchronized (_dataLock) {
if (_readyDataBlocks.size() <= 0) {
if ( (_notYetReadyBlocks.size() <= 0) && (_closeReceived) ) {
return -1;
} else {
if (_readTimeout < 0) {
try { _dataLock.wait(); } catch (InterruptedException ie) { }
} else if (_readTimeout > 0) {
try { _dataLock.wait(_readTimeout); } catch (InterruptedException ie) { }
} else { // readTimeout == 0
// noop, don't block
}
if (_readyDataBlocks.size() <= 0) {
throw new InterruptedIOException("Timeout reading");
}
}
}
// either was already ready, or we wait()ed and it arrived
ByteArray cur = (ByteArray)_readyDataBlocks.get(0);
byte rv = cur.getData()[_readyDataBlockIndex++];
if (cur.getData().length <= _readyDataBlockIndex) {
_readyDataBlockIndex = 0;
_readyDataBlocks.remove(0);
}
return (rv < 0 ? rv + 256 : rv);
}
}
public int available() throws IOException {
if (_locallyClosed) throw new IOException("Already closed, you wanker");
synchronized (_dataLock) {
if (_readyDataBlocks.size() <= 0)
return 0;
int numBytes = 0;
for (int i = 0; i < _readyDataBlocks.size(); i++) {
ByteArray cur = (ByteArray)_readyDataBlocks.get(i);
if (i == 0)
numBytes += cur.getData().length - _readyDataBlockIndex;
else
numBytes += cur.getData().length;
}
return numBytes;
}
}
/**
* How many bytes are queued up for reading (or sitting in the out-of-order
* buffer)?
*
*/
public int getTotalQueuedSize() {
synchronized (_dataLock) {
if (_locallyClosed) return 0;
int numBytes = 0;
for (int i = 0; i < _readyDataBlocks.size(); i++) {
ByteArray cur = (ByteArray)_readyDataBlocks.get(i);
if (i == 0)
numBytes += cur.getData().length - _readyDataBlockIndex;
else
numBytes += cur.getData().length;
}
for (Iterator iter = _notYetReadyBlocks.values().iterator(); iter.hasNext(); ) {
ByteArray cur = (ByteArray)iter.next();
numBytes += cur.getData().length;
}
return numBytes;
}
}
public void close() {
synchronized (_dataLock) {
_readyDataBlocks.clear();
// we don't need the data, but we do need to keep track of the messageIds
// received, so we can ACK accordingly
for (Iterator iter = _notYetReadyBlocks.values().iterator(); iter.hasNext(); ) {
ByteArray ba = (ByteArray)iter.next();
ba.setData(null);
}
_locallyClosed = true;
}
}
}

View File

@@ -0,0 +1,95 @@
package net.i2p.client.streaming;
import java.io.IOException;
import java.io.OutputStream;
/**
*
*/
public class MessageOutputStream extends OutputStream {
private byte _buf[];
private int _valid;
private Object _dataLock;
private DataReceiver _dataReceiver;
private IOException _streamError;
public MessageOutputStream(DataReceiver receiver) {
this(receiver, 64*1024);
}
public MessageOutputStream(DataReceiver receiver, int bufSize) {
super();
_buf = new byte[bufSize];
_dataReceiver = receiver;
_dataLock = new Object();
}
public void write(byte b[]) throws IOException {
write(b, 0, b.length);
}
public void write(byte b[], int off, int len) throws IOException {
synchronized (_dataLock) {
int remaining = len;
while (remaining > 0) {
if (_valid + remaining < _buf.length) {
// simply buffer the data, no flush
System.arraycopy(b, off, _buf, _valid, remaining);
remaining = 0;
} else {
// buffer whatever we can fit then flush,
// repeating until we've pushed all of the
// data through
int toWrite = _buf.length - _valid;
System.arraycopy(b, off, _buf, _valid, toWrite);
remaining -= toWrite;
_valid = _buf.length;
_dataReceiver.writeData(_buf, 0, _valid);
_valid = 0;
throwAnyError();
}
}
}
throwAnyError();
}
public void write(int b) throws IOException {
write(new byte[] { (byte)b }, 0, 1);
throwAnyError();
}
public void flush() throws IOException {
synchronized (_dataLock) {
_dataReceiver.writeData(_buf, 0, _valid);
_valid = 0;
}
throwAnyError();
}
private void throwAnyError() throws IOException {
if (_streamError != null) {
IOException ioe = _streamError;
_streamError = null;
throw ioe;
}
}
void streamErrorOccurred(IOException ioe) {
_streamError = ioe;
}
/**
* called whenever the engine wants to push more data to the
* peer
*
*/
void flushAvailable(DataReceiver target) {
synchronized (_dataLock) {
target.writeData(_buf, 0, _valid);
_valid = 0;
}
}
public interface DataReceiver {
public void writeData(byte buf[], int off, int size);
}
}

View File

@@ -0,0 +1,389 @@
package net.i2p.client.streaming;
import java.util.Arrays;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.Signature;
import net.i2p.data.SigningPrivateKey;
/**
* Contain a single packet transferred as part of a streaming connection.
* The data format is as follows:<ul>
* <li>{@see #getSendStreamId sendStreamId} [4 byte value]</li>
* <li>{@see #getReceiveStreamId receiveStreamId} [4 byte value]</li>
* <li>{@see #getSequenceNum sequenceNum} [4 byte unsigned integer]</li>
* <li>{@see #getAckThrough ackThrough} [4 byte unsigned integer]</li>
* <li>number of NACKs [1 byte unsigned integer]</li>
* <li>that many {@see #getNacks NACKs}</li>
* <li>{@see #getResendDelay resendDelay} [1 byte integer]</li>
* <li>flags [2 byte value]</li>
* <li>option data size [2 byte integer]</li>
* <li>option data specified by those flags [0 or more bytes]</li>
* <li>payload [remaining packet size]</li>
* </ul>
*
* <p>The flags field above specifies some metadata about the packet, and in
* turn may require certain additional data to be included. The flags are
* as follows (with any data structures specified added to the options area
* in the given order):</p><ol>
* <li>{@see #FLAG_SYNCHRONIZE}: no option data</li>
* <li>{@see #FLAG_CLOSE}: no option data</li>
* <li>{@see #FLAG_RESET}: no option data</li>
* <li>{@see #FLAG_SIGNATURE_INCLUDED}: {@see net.i2p.data.Signature}</li>
* <li>{@see #FLAG_SIGNATURE_REQUESTED}: no option data</li>
* <li>{@see #FLAG_FROM_INCLUDED}: {@see net.i2p.data.Destination}</li>
* <li>{@see #FLAG_DELAY_REQUESTED}: 1 byte integer</li>
* <li>{@see #FLAG_MAX_PACKET_SIZE_INCLUDED}: 2 byte integer</li>
* <li>{@see #FLAG_PROFILE_INTERACTIVE}: no option data</li>
* </ol>
*
* <p>If the signature is included, it uses the Destination's DSA key
* to sign the entire header and payload with the space in the options
* for the signature being set to all zeroes.</p>
*
*/
public class Packet {
private byte _sendStreamId[];
private byte _receiveStreamId[];
private long _sequenceNum;
private long _ackThrough;
private long _nacks[];
private int _resendDelay;
private int _flags;
private byte _payload[];
// the next four are set only if the flags say so
private Signature _optionSignature;
private Destination _optionFrom;
private int _optionDelay;
private int _optionMaxSize;
/**
* The receiveStreamId will be set to this when the packet doesn't know
* what ID will be assigned by the remote peer (aka this is the initial
* synchronize packet)
*
*/
public static final byte RECEIVE_STREAM_ID_UNKNOWN[] = new byte[] { 0x00, 0x00, 0x00, 0x00 };
/**
* This packet is creating a new socket connection (if the receiveStreamId
* is RECEIVE_STREAM_ID_UNKNOWN) or it is acknowledging a request to
* create a connection and in turn is accepting the socket.
*
*/
public static final int FLAG_SYNCHRONIZE = (1 << 0);
/**
* The sender of this packet will not be sending any more payload data.
*/
public static final int FLAG_CLOSE = (1 << 1);
/**
* This packet is being sent to signify that the socket does not exist
* (or, if in response to an initial synchronize packet, that the
* connection was refused).
*
*/
public static final int FLAG_RESET = (1 << 2);
/**
* This packet contains a DSA signature from the packet's sender. This
* signature is within the packet options. All synchronize packets must
* have this flag set.
*
*/
public static final int FLAG_SIGNATURE_INCLUDED = (1 << 3);
/**
* This packet wants the recipient to include signatures on subsequent
* packets sent to the creator of this packet.
*/
public static final int FLAG_SIGNATURE_REQUESTED = (1 << 4);
/**
* This packet includes the full I2P destination of the packet's sender.
* The initial synchronize packet must have this flag set.
*/
public static final int FLAG_FROM_INCLUDED = (1 << 5);
/**
* This packet includes an explicit request for the recipient to delay
* sending any packets with data for a given amount of time.
*
*/
public static final int FLAG_DELAY_REQUESTED = (1 << 6);
/**
* This packet includes a request that the recipient not send any
* subsequent packets with payloads greater than a specific size.
* If not set and no prior value was delivered, the maximum value
* will be assumed (approximately 32KB).
*
*/
public static final int FLAG_MAX_PACKET_SIZE_INCLUDED = (1 << 7);
/**
* If set, this packet is travelling as part of an interactive flow,
* meaning it is more lag sensitive than throughput sensitive. aka
* send data ASAP rather than waiting around to send full packets.
*
*/
public static final int FLAG_PROFILE_INTERACTIVE = (1 << 8);
/** what stream is this packet a part of? */
public byte[] getSendStreamId() { return _sendStreamId; }
public void setSendStreamId(byte[] id) { _sendStreamId = id; }
/**
* what is the stream replies should be sent on? if the
* connection is still being built, this should be
* {@see #RECEIVE_STREAM_ID_UNKNOWN}.
*
*/
public byte[] getReceiveStreamId() { return _receiveStreamId; }
public void setReceiveStreamId(byte[] id) { _receiveStreamId = id; }
/** 0-indexed sequence number for this Packet in the sendStream */
public long getSequenceNum() { return _sequenceNum; }
public void setSequenceNum(long num) { _sequenceNum = num; }
/**
* what is the highest packet sequence number that received
* on the receiveStreamId? This field is ignored on the initial
* connection packet (where receiveStreamId is the unknown id).
*
*/
public long getAckThrough() { return _ackThrough; }
public void setAckThrough(long id) { _ackThrough = id; }
/**
* What packet sequence numbers below the getAckThrough() value
* have not been received? this may be null.
*
*/
public long[] getNacks() { return _nacks; }
public void setNacks(long nacks[]) { _nacks = nacks; }
/**
* How long is the creator of this packet going to wait before
* resending this packet (if it hasn't yet been ACKed). The
* value is seconds since the packet was created.
*
*/
public int getResendDelay() { return _resendDelay; }
public void setResendDelay(int numSeconds) { _resendDelay = numSeconds; }
/** get the actual payload of the message. may be null */
public byte[] getPayload() { return _payload; }
public void setPayload(byte payload[]) { _payload = payload; }
/** is a particular flag set on this packet? */
public boolean isFlagSet(int flag) { return 0 != (_flags & flag); }
public void setFlag(int flag) { _flags |= flag; }
/** the signature on the packet (only included if the flag for it is set) */
public Signature getOptionalSignature() { return _optionSignature; }
public void setOptionalSignature(Signature sig) { _optionSignature = sig; }
/** the sender of the packet (only included if the flag for it is set) */
public Destination getOptionalFrom() { return _optionFrom; }
public void setOptionalFrom(Destination from) { _optionFrom = from; }
/**
* How many milliseconds the sender of this packet wants the recipient
* to wait before sending any more data (only valid if the flag for it is
* set)
*/
public int getOptionalDelay() { return _optionDelay; }
public void setOptionalDelay(int delayMs) { _optionDelay = delayMs; }
/**
* What is the largest payload the sender of this packet wants to receive?
*
*/
public int getOptionalMaxSize() { return _optionMaxSize; }
public void setOptionalMaxSize(int numBytes) { _optionMaxSize = numBytes; }
/**
* Write the packet to the buffer (starting at the offset) and return
* the number of bytes written.
*
* @throws IllegalStateException if there is data missing or otherwise b0rked
*/
public int writePacket(byte buffer[], int offset) throws IllegalStateException {
return writePacket(buffer, offset, true);
}
/**
* @param includeSig if true, include the real signature, otherwise put zeroes
* in its place.
*/
private int writePacket(byte buffer[], int offset, boolean includeSig) throws IllegalStateException {
int cur = offset;
System.arraycopy(_sendStreamId, 0, buffer, cur, _sendStreamId.length);
cur += _sendStreamId.length;
System.arraycopy(_receiveStreamId, 0, buffer, cur, _receiveStreamId.length);
cur += _receiveStreamId.length;
DataHelper.toLong(buffer, cur, 4, _sequenceNum);
cur += 4;
DataHelper.toLong(buffer, cur, 4, _ackThrough);
cur += 4;
if (_nacks != null) {
DataHelper.toLong(buffer, cur, 1, _nacks.length);
cur++;
for (int i = 0; i < _nacks.length; i++) {
DataHelper.toLong(buffer, cur, 4, _nacks[i]);
cur += 4;
}
} else {
DataHelper.toLong(buffer, cur, 1, 0);
cur++;
}
DataHelper.toLong(buffer, cur, 1, _resendDelay);
cur++;
DataHelper.toLong(buffer, cur, 2, _flags);
cur += 2;
int optionSize = 0;
if (isFlagSet(FLAG_DELAY_REQUESTED))
optionSize += 1;
if (isFlagSet(FLAG_FROM_INCLUDED))
optionSize += _optionFrom.size();
if (isFlagSet(FLAG_MAX_PACKET_SIZE_INCLUDED))
optionSize += 2;
if (isFlagSet(FLAG_SIGNATURE_INCLUDED))
optionSize += Signature.SIGNATURE_BYTES;
DataHelper.toLong(buffer, cur, 2, optionSize);
cur += 2;
if (isFlagSet(FLAG_DELAY_REQUESTED)) {
DataHelper.toLong(buffer, cur, 1, _optionDelay);
cur++;
}
if (isFlagSet(FLAG_FROM_INCLUDED)) {
cur += _optionFrom.writeBytes(buffer, cur);
}
if (isFlagSet(FLAG_MAX_PACKET_SIZE_INCLUDED)) {
DataHelper.toLong(buffer, cur, 2, _optionMaxSize);
cur += 2;
}
if (isFlagSet(FLAG_SIGNATURE_INCLUDED)) {
if (includeSig)
System.arraycopy(_optionSignature.getData(), 0, buffer, cur, Signature.SIGNATURE_BYTES);
else // we're signing (or validating)
Arrays.fill(buffer, cur, Signature.SIGNATURE_BYTES, (byte)0x0);
cur += Signature.SIGNATURE_BYTES;
}
if (_payload != null) {
System.arraycopy(_payload, 0, buffer, cur, _payload.length);
cur += _payload.length;
}
return cur - offset;
}
/**
* Read the packet from the buffer (starting at the offset) and return
* the number of bytes read.
*
* @param buffer packet buffer containing the data
* @param offset index into the buffer to start readign
* @param length how many bytes within the buffer past the offset are
* part of the packet?
*
* @throws IllegalArgumentException if the data is b0rked
*/
public void readPacket(byte buffer[], int offset, int length) throws IllegalArgumentException {
int cur = offset;
_sendStreamId = new byte[4];
System.arraycopy(buffer, cur, _sendStreamId, 0, 4);
cur += 4;
_receiveStreamId = new byte[4];
System.arraycopy(buffer, cur, _receiveStreamId, 0, 4);
cur += 4;
_sequenceNum = DataHelper.fromLong(buffer, cur, 4);
cur += 4;
_ackThrough = DataHelper.fromLong(buffer, cur, 4);
cur += 4;
int numNacks = (int)DataHelper.fromLong(buffer, cur, 1);
cur++;
if (numNacks > 0) {
_nacks = new long[numNacks];
for (int i = 0; i < numNacks; i++) {
_nacks[i] = DataHelper.fromLong(buffer, cur, 4);
cur += 4;
}
} else {
_nacks = null;
}
_resendDelay = (int)DataHelper.fromLong(buffer, cur, 1);
cur++;
_flags = (int)DataHelper.fromLong(buffer, cur, 2);
cur += 2;
int optionSize = (int)DataHelper.fromLong(buffer, cur, 2);
cur += 2;
int payloadBegin = cur + optionSize;
// skip ahead to the payload
_payload = new byte[offset + length - payloadBegin];
System.arraycopy(buffer, payloadBegin, _payload, 0, _payload.length);
// ok now lets go back and deal with the options
if (isFlagSet(FLAG_DELAY_REQUESTED)) {
_optionDelay = (int)DataHelper.fromLong(buffer, cur, 1);
cur++;
}
if (isFlagSet(FLAG_FROM_INCLUDED)) {
_optionFrom = new Destination();
cur += _optionFrom.readBytes(buffer, cur);
}
if (isFlagSet(FLAG_MAX_PACKET_SIZE_INCLUDED)) {
_optionMaxSize = (int)DataHelper.fromLong(buffer, cur, 2);
cur += 2;
}
if (isFlagSet(FLAG_SIGNATURE_INCLUDED)) {
Signature sig = new Signature();
byte buf[] = new byte[Signature.SIGNATURE_BYTES];
System.arraycopy(buffer, cur, buf, 0, Signature.SIGNATURE_BYTES);
sig.setData(buf);
cur += Signature.SIGNATURE_BYTES;
}
}
/**
* Determine whether the signature on the data is valid.
*
* @return true if the signature exists and validates against the data,
* false otherwise.
*/
public boolean verifySignature(I2PAppContext ctx, Destination from, byte buffer[]) {
if (!isFlagSet(FLAG_SIGNATURE_INCLUDED)) return false;
if (_optionSignature == null) return false;
int size = writePacket(buffer, 0, false);
return ctx.dsa().verifySignature(_optionSignature, buffer, 0, size, from.getSigningPublicKey());
}
/**
* Sign and write the packet to the buffer (starting at the offset) and return
* the number of bytes written.
*
* @throws IllegalStateException if there is data missing or otherwise b0rked
*/
public int writeSignedPacket(byte buffer[], int offset, I2PAppContext ctx, SigningPrivateKey key) throws IllegalStateException {
setFlag(FLAG_SIGNATURE_INCLUDED);
int size = writePacket(buffer, offset, false);
_optionSignature = ctx.dsa().sign(buffer, offset, size, key);
// jump into the signed data and inject the signature where we
// previously placed a bunch of zeroes
int signatureOffset = offset
+ 4 // sendStreamId
+ 4 // receiveStreamId
+ 4 // sequenceNum
+ 4 // ackThrough
+ (_nacks != null ? 4*_nacks.length + 1 : 1)
+ 1 // resendDelay
+ 2 // flags
+ 2 // optionSize
+ (isFlagSet(FLAG_DELAY_REQUESTED) ? 1 : 0)
+ (isFlagSet(FLAG_FROM_INCLUDED) ? _optionFrom.size() : 0)
+ (isFlagSet(FLAG_MAX_PACKET_SIZE_INCLUDED) ? 2 : 0);
System.arraycopy(_optionSignature.getData(), 0, buffer, signatureOffset, Signature.SIGNATURE_BYTES);
return size;
}
}

View File

@@ -0,0 +1,90 @@
package net.i2p.client.streaming;
import java.io.InputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
/**
* Stress test the MessageInputStream
*/
public class MessageInputStreamTest {
private I2PAppContext _context;
private Log _log;
public MessageInputStreamTest() {
_context = I2PAppContext.getGlobalContext();
_log = _context.logManager().getLog(MessageInputStreamTest.class);
}
public void testInOrder() {
byte orig[] = new byte[32*1024];
_context.random().nextBytes(orig);
MessageInputStream in = new MessageInputStream();
for (int i = 0; i < 32; i++) {
byte msg[] = new byte[1024];
System.arraycopy(orig, i*1024, msg, 0, 1024);
in.messageReceived(i, msg);
}
byte read[] = new byte[32*1024];
try {
int howMany = DataHelper.read(in, read);
if (howMany != orig.length)
throw new RuntimeException("Failed test: not enough bytes read [" + howMany + "]");
if (!DataHelper.eq(orig, read))
throw new RuntimeException("Failed test: data read is not equal");
_log.info("Passed test: in order");
} catch (IOException ioe) {
throw new RuntimeException("IOError reading: " + ioe.getMessage());
}
}
public void testRandomOrder() {
byte orig[] = new byte[32*1024];
_context.random().nextBytes(orig);
MessageInputStream in = new MessageInputStream();
ArrayList order = new ArrayList(32);
for (int i = 0; i < 32; i++)
order.add(new Integer(i));
Collections.shuffle(order);
for (int i = 0; i < 32; i++) {
byte msg[] = new byte[1024];
Integer cur = (Integer)order.get(i);
System.arraycopy(orig, cur.intValue()*1024, msg, 0, 1024);
in.messageReceived(cur.intValue(), msg);
_log.debug("Injecting " + cur);
}
byte read[] = new byte[32*1024];
try {
int howMany = DataHelper.read(in, read);
if (howMany != orig.length)
throw new RuntimeException("Failed test: not enough bytes read [" + howMany + "]");
if (!DataHelper.eq(orig, read))
throw new RuntimeException("Failed test: data read is not equal");
_log.info("Passed test: random order");
} catch (IOException ioe) {
throw new RuntimeException("IOError reading: " + ioe.getMessage());
}
}
public static void main(String args[]) {
MessageInputStreamTest t = new MessageInputStreamTest();
try {
t.testInOrder();
t.testRandomOrder();
} catch (Exception e) {
e.printStackTrace();
}
try { Thread.sleep(10*1000); } catch (InterruptedException ie) {}
}
}

View File

@@ -14,8 +14,8 @@ package net.i2p;
*
*/
public class CoreVersion {
public final static String ID = "$Revision: 1.22 $ $Date: 2004/10/01 12:23:00 $";
public final static String VERSION = "0.4.1.2";
public final static String ID = "$Revision: 1.23 $ $Date: 2004/10/10 14:33:08 $";
public final static String VERSION = "0.4.1.3";
public static void main(String args[]) {
System.out.println("I2P Core version: " + VERSION);

View File

@@ -38,6 +38,7 @@ public interface I2PSession {
* @return whether it was accepted by the router for delivery or not
*/
public boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException;
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size) throws I2PSessionException;
/**
* Like sendMessage above, except the key used and the tags sent are exposed to the
@@ -66,8 +67,8 @@ public interface I2PSession {
* the contents of the set is ignored during the call, but afterwards it contains a set of SessionTag
* objects that were sent along side the given keyUsed.
*/
public boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed, Set tagsSent)
throws I2PSessionException;
public boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed, Set tagsSent) throws I2PSessionException;
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent) throws I2PSessionException;
/** Receive a message that the router has notified the client about, returning
* the payload.

View File

@@ -62,13 +62,21 @@ class I2PSessionImpl2 extends I2PSessionImpl {
}
public boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException {
return sendMessage(dest, payload, new SessionKey(), new HashSet(64));
return sendMessage(dest, payload, 0, payload.length);
}
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size) throws I2PSessionException {
return sendMessage(dest, payload, offset, size, new SessionKey(), new HashSet(64));
}
public boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed, Set tagsSent)
public boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed, Set tagsSent) throws I2PSessionException {
return sendMessage(dest, payload, 0, payload.length, keyUsed, tagsSent);
}
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent)
throws I2PSessionException {
if (isClosed()) throw new I2PSessionException("Already closed");
if (SHOULD_COMPRESS) payload = DataHelper.compress(payload);
if (SHOULD_COMPRESS) payload = DataHelper.compress(payload, offset, size);
else throw new IllegalStateException("we need to update sendGuaranteed to support partial send");
// we always send as guaranteed (so we get the session keys/tags acked),
// but only block until the appropriate event has been reached (guaranteed
// success or accepted). we may want to break this out into a seperate

View File

@@ -50,8 +50,10 @@ public class DSAEngine {
public static DSAEngine getInstance() {
return I2PAppContext.getGlobalContext().dsa();
}
public boolean verifySignature(Signature signature, byte signedData[], SigningPublicKey verifyingKey) {
return verifySignature(signature, signedData, 0, signedData.length, verifyingKey);
}
public boolean verifySignature(Signature signature, byte signedData[], int offset, int size, SigningPublicKey verifyingKey) {
long start = _context.clock().now();
byte[] sigbytes = signature.getData();
@@ -68,7 +70,7 @@ public class DSAEngine {
BigInteger r = new NativeBigInteger(1, rbytes);
BigInteger y = new NativeBigInteger(1, verifyingKey.getData());
BigInteger w = s.modInverse(CryptoConstants.dsaq);
byte data[] = calculateHash(signedData).getData();
byte data[] = calculateHash(signedData, offset, size).getData();
NativeBigInteger bi = new NativeBigInteger(1, data);
BigInteger u1 = bi.multiply(w).mod(CryptoConstants.dsaq);
BigInteger u2 = r.multiply(w).mod(CryptoConstants.dsaq);
@@ -88,6 +90,9 @@ public class DSAEngine {
}
public Signature sign(byte data[], SigningPrivateKey signingKey) {
return sign(data, 0, data.length, signingKey);
}
public Signature sign(byte data[], int offset, int length, SigningPrivateKey signingKey) {
if ((signingKey == null) || (data == null) || (data.length <= 0)) return null;
long start = _context.clock().now();
@@ -100,7 +105,7 @@ public class DSAEngine {
BigInteger r = CryptoConstants.dsag.modPow(k, CryptoConstants.dsap).mod(CryptoConstants.dsaq);
BigInteger kinv = k.modInverse(CryptoConstants.dsaq);
Hash h = calculateHash(data);
Hash h = calculateHash(data, offset, length);
if (h == null) return null;
@@ -150,42 +155,42 @@ public class DSAEngine {
private int[] H0 = { 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0};
private Hash calculateHash(byte[] source) {
long length = source.length * 8;
private Hash calculateHash(byte[] source, int offset, int len) {
long length = len * 8;
int k = 448 - (int) ((length + 1) % 512);
if (k < 0) {
k += 512;
}
int padbytes = k / 8;
int wordlength = source.length / 4 + padbytes / 4 + 3;
int wordlength = len / 4 + padbytes / 4 + 3;
int[] M0 = new int[wordlength];
int wordcount = 0;
int x = 0;
for (x = 0; x < (source.length / 4) * 4; x += 4) {
M0[wordcount] = source[x] << 24 >>> 24 << 24;
M0[wordcount] |= source[x + 1] << 24 >>> 24 << 16;
M0[wordcount] |= source[x + 2] << 24 >>> 24 << 8;
M0[wordcount] |= source[x + 3] << 24 >>> 24 << 0;
for (x = 0; x < (len / 4) * 4; x += 4) {
M0[wordcount] = source[offset + x] << 24 >>> 24 << 24;
M0[wordcount] |= source[offset + x + 1] << 24 >>> 24 << 16;
M0[wordcount] |= source[offset + x + 2] << 24 >>> 24 << 8;
M0[wordcount] |= source[offset + x + 3] << 24 >>> 24 << 0;
wordcount++;
}
switch (source.length - (wordcount + 1) * 4 + 4) {
switch (len - (wordcount + 1) * 4 + 4) {
case 0:
M0[wordcount] |= 0x80000000;
break;
case 1:
M0[wordcount] = source[x] << 24 >>> 24 << 24;
M0[wordcount] = source[offset + x] << 24 >>> 24 << 24;
M0[wordcount] |= 0x00800000;
break;
case 2:
M0[wordcount] = source[x] << 24 >>> 24 << 24;
M0[wordcount] |= source[x + 1] << 24 >>> 24 << 16;
M0[wordcount] = source[offset + x] << 24 >>> 24 << 24;
M0[wordcount] |= source[offset + x + 1] << 24 >>> 24 << 16;
M0[wordcount] |= 0x00008000;
break;
case 3:
M0[wordcount] = source[x] << 24 >>> 24 << 24;
M0[wordcount] |= source[x + 1] << 24 >>> 24 << 16;
M0[wordcount] |= source[x + 2] << 24 >>> 24 << 8;
M0[wordcount] = source[offset + x] << 24 >>> 24 << 24;
M0[wordcount] |= source[offset + x + 1] << 24 >>> 24 << 16;
M0[wordcount] |= source[offset + x + 2] << 24 >>> 24 << 8;
M0[wordcount] |= 0x00000080;
break;
}

View File

@@ -86,6 +86,41 @@ public class Certificate extends DataStructureImpl {
DataHelper.writeLong(out, 2, 0L);
}
}
public int writeBytes(byte target[], int offset) {
int cur = offset;
DataHelper.toLong(target, cur, 1, _type);
cur++;
if (_payload != null) {
DataHelper.toLong(target, cur, 2, _payload.length);
cur += 2;
System.arraycopy(_payload, 0, target, cur, _payload.length);
cur += _payload.length;
} else {
DataHelper.toLong(target, cur, 2, 0);
cur += 2;
}
return cur - offset;
}
public int readBytes(byte source[], int offset) {
int cur = offset;
_type = (int)DataHelper.fromLong(source, cur, 1);
cur++;
int length = (int)DataHelper.fromLong(source, cur, 2);
cur += 2;
if (length > 0) {
_payload = new byte[length];
System.arraycopy(source, cur, _payload, 0, length);
cur += length;
}
return cur - offset;
}
public int size() {
return 1 + 2 + (_payload != null ? _payload.length : 0);
}
public boolean equals(Object object) {
if ((object == null) || !(object instanceof Certificate)) return false;

View File

@@ -263,12 +263,16 @@ public class DataHelper {
}
public static byte[] toLong(int numBytes, long value) throws IllegalArgumentException {
byte val[] = new byte[numBytes];
toLong(val, 0, numBytes, value);
return val;
}
public static void toLong(byte target[], int offset, int numBytes, long value) throws IllegalArgumentException {
if (numBytes <= 0) throw new IllegalArgumentException("Invalid number of bytes");
if (value < 0) throw new IllegalArgumentException("Negative value not allowed");
byte val[] = new byte[numBytes];
for (int i = 0; i < numBytes; i++)
val[numBytes-i-1] = (byte)(value >>> (i*8));
return val;
target[offset+numBytes-i-1] = (byte)(value >>> (i*8));
}
public static long fromLong(byte src[], int offset, int numBytes) {
@@ -678,11 +682,14 @@ public class DataHelper {
/** compress the data and return a new GZIP compressed array */
public static byte[] compress(byte orig[]) {
return compress(orig, 0, orig.length);
}
public static byte[] compress(byte orig[], int offset, int size) {
if ((orig == null) || (orig.length <= 0)) return orig;
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(orig.length);
GZIPOutputStream out = new GZIPOutputStream(baos, orig.length);
out.write(orig);
ByteArrayOutputStream baos = new ByteArrayOutputStream(size);
GZIPOutputStream out = new GZIPOutputStream(baos, size);
out.write(orig, offset, size);
out.finish();
out.flush();
byte rv[] = baos.toByteArray();

View File

@@ -79,7 +79,41 @@ public class Destination extends DataStructureImpl {
_signingKey.writeBytes(out);
_certificate.writeBytes(out);
}
public int writeBytes(byte target[], int offset) {
int cur = offset;
System.arraycopy(_publicKey.getData(), 0, target, cur, PublicKey.KEYSIZE_BYTES);
cur += PublicKey.KEYSIZE_BYTES;
System.arraycopy(_signingKey.getData(), 0, target, cur, SigningPublicKey.KEYSIZE_BYTES);
cur += SigningPublicKey.KEYSIZE_BYTES;
cur += _certificate.writeBytes(target, cur);
return cur - offset;
}
public int readBytes(byte source[], int offset) {
int cur = offset;
_publicKey = new PublicKey();
byte buf[] = new byte[PublicKey.KEYSIZE_BYTES];
System.arraycopy(source, cur, buf, 0, PublicKey.KEYSIZE_BYTES);
_publicKey.setData(buf);
cur += PublicKey.KEYSIZE_BYTES;
_signingKey = new SigningPublicKey();
buf = new byte[SigningPublicKey.KEYSIZE_BYTES];
System.arraycopy(source, cur, buf, 0, SigningPublicKey.KEYSIZE_BYTES);
cur += SigningPublicKey.KEYSIZE_BYTES;
_certificate = new Certificate();
cur += _certificate.readBytes(buf, cur);
return cur - offset;
}
public int size() {
return PublicKey.KEYSIZE_BYTES + SigningPublicKey.KEYSIZE_BYTES + _certificate.size();
}
public boolean equals(Object object) {
if ((object == null) || !(object instanceof Destination)) return false;
Destination dst = (Destination) object;

View File

@@ -2,6 +2,7 @@ package net.i2p.util;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
@@ -18,10 +19,14 @@ public class SimpleTimer {
private static final SimpleTimer _instance = new SimpleTimer();
public static SimpleTimer getInstance() { return _instance; }
private Log _log;
/** event time (Long) to event (TimedEvent) mapping */
private Map _events;
/** event (TimedEvent) to event time (Long) mapping */
private Map _eventTimes;
private SimpleTimer() {
_events = new TreeMap();
_eventTimes = new HashMap();
I2PThread runner = new I2PThread(new SimpleTimerRunner());
runner.setName("SimpleTimer");
runner.setDaemon(true);
@@ -35,9 +40,13 @@ public class SimpleTimer {
public void addEvent(TimedEvent event, long timeoutMs) {
long eventTime = System.currentTimeMillis() + timeoutMs;
synchronized (_events) {
// remove the old scheduled position, then reinsert it
if (_eventTimes.containsKey(event))
_events.remove(_eventTimes.get(event));
while (_events.containsKey(new Long(eventTime)))
eventTime++;
_events.put(new Long(eventTime), event);
_eventTimes.put(event, new Long(eventTime));
_events.notifyAll();
}
}
@@ -87,6 +96,8 @@ public class SimpleTimer {
if (timesToRemove.size() > 0) {
for (int i = 0; i < timesToRemove.size(); i++)
_events.remove(timesToRemove.get(i));
for (int i = 0; i < eventsToFire.size(); i++)
_eventTimes.remove(eventsToFire.get(i));
} else {
if (nextEventDelay != -1)
_events.wait(nextEventDelay);

View File

@@ -1,4 +1,69 @@
$Id: history.txt,v 1.43 2004/10/10 09:57:15 jrandom Exp $
$Id: history.txt,v 1.53 2004/10/17 16:03:06 jrandom Exp $
* 2004-10-18 0.4.1.3 released
2004-10-18 jrandom
* Allow sending messages with a section of a byte array.
* Reduced stats published.
2004-10-17 jrandom
* Don't b0rk on whitespace in the router address.
2004-10-16 jrandom
* More aggressively reduce the capacity of peers if their tunnels are
failing so that we move off them quicker.
* Simplify some data structure serialization for reuse in the streaming
lib, as well as add support for signing and verifying partial byte
arrays.
* Logging updates
2004-10-16 jrandom
* Increased the default minimum tunnel test time to 5 seconds, since we
still see the occational message processing time spike to 2 seconds.
* Update the SimpleTimer to allow rescheduling a task thats already
queued (useful for the new streaming lib).
2004-10-15 jrandom
* Replaced old minimum tunnel test timeout of 1s with a configurable
value (router.config property "router.tunnelTestMinimum", with the
default of 2s).
2004-10-14 jrandom
* Tunnel rejection is no longer a sign of an overwhelmingly loaded
peer, so don't use it as a key point of the IsFailing calculator.
We still use it as a key point of the Capacity calculator, however.
2004-10-14 jrandom
* Allow for a configurable tunnel "growth factor", rather than trying
to achieve a steady state. This will let us grow gradually when
the router is needed more, rather than blindly accepting the request
or arbitrarily choking it at an averaged value. Configure this with
"router.tunnelGrowthFactor" in the router.config (default "1.5").
* Adjust the tunnel test timeouts dynamically - rather than the old
flat 30s (!!!) timeout, we set the timeout to 2x the average tunnel
test time (the deviation factor can be adjusted by setting
"router.tunnelTestDeviation" to "3.0" or whatever). This should help
find the 'good' tunnels.
* Added some crazy debugging to try and track down an intermittent hang.
2004-10-13 jrandom
* Fix the probabalistic tunnel reject (we always accepted everything,
since the docs on java.util.Random.nextDouble() are wrong..)
* Fixed a race on startup (thanks Quadn!)
2004-10-12 jrandom
* Disable the probabalistic drop by default (enable via the router config
property "tcp.dropProbabalistically=true")
* Disable the actual watchdog shutdown by default, but keep track of more
variables and log a lot more when it occurs (enable via the router
config property "watchdog.haltOnHang=true")
* Implement some tunnel participation smoothing by refusing requests
probabalistically as our participating tunnel count exceeds the previous
hour's, or when the 10 minute average tunnel test time exceeds the 60
minute average tunnel test time. The probabilities in both cases are
oldAverage / #current, so if you're suddenly flooded with 200 tunnels
and you had previously only participated in 50, you'll have a 25% chance
of accepting a subsequent request.
* 2004-10-10 0.4.1.2 released

View File

@@ -1,6 +1,14 @@
; TC's hosts.txt guaranteed freshness
; $Id: hosts.txt,v 1.56 2004/10/05 10:57:08 jrandom Exp $
; $Id: hosts.txt,v 1.64 2004/10/17 16:39:06 jrandom Exp $
; changelog:
; (1.87) updated curiosity.i2p (via signed email)
; (1.86) added blueheron.i2p
; (1.85) updated files.i2p (after verifying ident)
; (1.84) added utansans.i2p
; (1.83) added irc.orz.i2p
; (1.82) added nano.i2p
; (1.81) added ragnarok.i2p
; (1.80) added marcos.i2p
; (1.79) updated beyond.i2p's key
; (1.78) added b.i2p
; (1.77) added tinyurl.i2p
@@ -162,14 +170,14 @@ cvs.i2p=dIvSbo35u0bc-5Vp0Wqs4YvarZS6l-y-URwOgH~NrRfQJ6mQmnfcmBJ1XPWswSHEGeRchK8l
linuxagent.i2p=OE-WAF6jzHUbUF7eytgiIaIKWh~oGf3c8~q-fUBbM2WntbXO7AhpU0QmZ~1lZIiQfzO-2Xyr0e-2VQBNexw73iOPLDII7mLv6xUnZmaKm-QPdvDEI9-ILPWgpMGJubAgA4ZmDgIUg2dSQ19Eq8lp5bwkuIUuU8Jd6JS1fvX-87G7PjcrE7vKM-Ty2gCnp7cWsadl8qYEkXLf5-CIr3LjGM-B-G~NoIqPf56df9ayHVguqT1f0c-d00qY80kUvb8DJ-X6VR9TNut6dKqRajI0earbkALP~wdoFggIIebqm1UXeI1tYROcrb155z4qu-4Nb~q4wzNAI9AVjNboIk50P2X0TrLNmjL9RWCrvHCt-r2ZV35Skpka4YmxATU59jSPw8jhXw1TgM-5DRswEtmOdxJMpkQU~iu~TJWmmcVvpkqyNMqXiqtOKqEgLTj8gOfNaITH0kdMVvXPKmYPTFimWu64T0VGBsqS~PQ50JxtteUP5FZv-dXluL8EbhtwQ7mLAAAA
gott.i2p=BxciUZkGQwi6Sj1Ucg5GGagDdwujlj8ClePa4~3d-1nnRJsBhTNJtvs-UuQYY77gPGNlpNk9dt02mxeS7f~pEC4E1KxJH9mhnf0OlIGB4hOOhDlXokAaKE2u1E-vVCDJlZCq32r~Ne53w-L2m5h5FAq5Bx7NXrTzWAEPgAlC8A2wASJWIF-EKOX5kfkkYoF6sKZqam5CxAAAMAUwmnbD--7wo8d2mz0C-c~oE7hzuHsg2J8yME5Zd~-FOZUxkoNCBJdfrVlVRn9~6685zqpaotL-SIqFK6~28ZCNMNCtnZcZZLeG2gZxq2pe8HtgzDrMMSx45vs3JLUp8Bap6~D9oFX0o7k2DQm-5R9D5NWhsJgol5Ny~8ABTXBpW~WQQOuKxE5xJauXM5rQia2yyXgldfOyGjxYnkrTGu6LaxlLmhyAva3ZbOO0mtLvrKLZebLKCYUxP7~TtNmSWEAzPKFYbjdZ~NjE0q4TanLFBaWotefPmy3IuAc8Mr7PbCvi4GmdAAAA
beyond.i2p=Gy9wxnDgcaNO8b-1gZTAn4GKWiMZHLSuOpXF9SSrVVO5Gx791OwZVNg9mMHvIkSekWuqtJwIRCArfXtaQNNe4ZJfl2bVNEKJonfXM9HLbNoJiSgmg-x75xYLXb04NDYhIbHJ4zSEpDFufjk1kI35XJo~32~qYynDMbYX9b3QtNfFfcBrOzH5iObTsMBBe5f2zEpjl1UWNGuKYa1tVHef65z6WCr16TUYSj5uDnUyTWWkwrgDZbTnRRlz--BQ9y4WiCgzQyMvRaNU-T6aPj6P9HlQIhV7efttpb2IRLpSKepIYG66ohBhI1n2G2dd18UHdR~PWNIy8x7AsCczlqnfh4lcJdiQqrNHQtpOI3XfM5gLM5sKLwrxbsQwB-6dnhonztTd1o9TWhTuA1FSlk3nRQ22E1~pQRh0sexgWjk1oa0Qr6kOojkY955aL6ZnD7RJx3fbeZ9JAy6s5uxYXuE1wkGQNUVEvi15jgVB8zk4GaB8ME6tdWVtcBTMjEqvulyEAAAA
files.i2p=1rI0OWp83VhVRyMiZkPmw~mEo-CVu548n4uJLsHJW4eNzQfQ~k8SSFGCMc0-YmOW8zvB1AmO1~hTy9qrT4-5rkVxvy7XTwK9-Mvb8N9fKvxr89yBitQvBwhsRKNScMBcV-qsauGd1F~wV~eb0BFjfZG2pyB~-AYuqZrqmoEf48Q~aXn5IKnIXkb8RmKbJNAws3opW3Tvbb-cHMhth3noc9QEpCJzzPZ6~hjjh0FTcPFiFAgaRIL81YcSfMTG7eTg2X20~5j4Em4GY6n8zHw6S5hbBuSMp~udmz11Bvc3QMcbfPMYhMxGMaqWUN8yN3kJwjk8griosz0o6XE~NcUvVXKZELW7jNmURd5e2a-zkSwjlOzoRi3OQhdeGypbUM0c-0-2OxOMJM~yw0bRingvRHT02Xr11nhyZRbysSMxFfxMY5doNKvJk~H7XhCaXCn0LtP7pd9JU9NPKzxIqbP8W0dZFP8C217yRmAHTfxbMmuYaQZenEkWmUVqlTyO-4NBAAAA
files.i2p=aEKZJq-9MfKzgKfxERC2Joj1ar-0c6dTrcLvwg4RVMI5uGCYfCSCRTtgFcwvEpVfTQ5RsGIxtPorEvbLjiY5TJJwtQj~ZWe3o2asm7Cwp-nmegXdAFyUSAKI33XUDvtVZjx2Erc~eGa39sIJrtJnfvapfRqmq1Xh9dcTMzEs9Aaz7cnMumTi6p5w-bBsQpQR3IBzsGkfmsUFaA4nwodz6ZhBs1Fqls7WrFLAlUz9hiKwtV-MrFy5RU5IH7elPkcN~5ReIDVBZ4axJZfMBOyszt3h6gmjyMiiaQ7keks8KDqFbQRZKUS8aRPCIzB90bzxbWZRO4gu0ApMeOd~tuIH71zYZ0UqdLOWo67iZhFz~njriskKH~1~lk--5Mk6487POUcY740ZC3FJl~6VXjf9Pv1n08Usf2Z3rK2i9QbwENsn9eH4T5bP5-JNDL8c3IZAHPo93W27gsNPAQkWrKC~Fvk9KK8jATpEBl2e5iZVUmwA07xLw~XJrFKQwaLS-KpPAAAA
sugadude.i2p=PIV8bZaIH4wRqzlzVLX1mDkELhLFdyLlKxcd2nUrJvd0YeTByls6LGFZ7wr0-9MT7cb3Ee23U1BApgm0~jT5bzFGIQOvx7AT8o0uMy~fcP1lhzo4lNwD5bHnf9OXe59FzORC3s79bwFmTtMCrEoYvQXCGherxwFfXywRPrJ2x0AjfjungiwX3vrndMdol1SDWTLdxTSARLPgLmswz47gGm4dOipjHBSGOm5bdgZBD6di8YGxHrB072QfXxBVSGzTgtQNnK4qAsZv1YbETk2CQ1nOKpCj0Zjkcnc1trTFOpUQKeOp6kiGCriAn8Ezbu1yuf2fiDAmPenFf8bDR7fNFR7kMDcKsbiht7fFi0wjPdE4Lx5H2PFyTuiw3~-osNsXFLtjvaeIV4P99vZRof1avADgp5k4pC8fEPKWfwXGPPFJjYEbWPE4VxmxcP93Iq-CMZfSd8fPeZNeBz8qL~n9IXJoXjO0G3nouQ7d1ZF4okVRO8FmZ91Umpy79v2I4sYFAAAA
socks1.tor.i2p=sl3oda7CJe3TN1JVoALIuoRAe--qYgDwo0A8PrUGaaWQ2xXiWYaTV7~FQOmv13JRCtbO-8rhOfGWnV-qia4CtRxH6nR6fytW~uJKRP4hbjhfvBuS1QdX2GwWlbgJdV8VT3OpJAe5iuKYCFnnSbVG2VZajtsoKJxHY748h1zhIh4YaMBGqoodQJrnvYWd4HJJ-ZauE-s-K2zWOTJ-EKZNp9UZr5DWDV3P5JTSruc7vyKiDVFra~-JT8mjnWBE9GKy1HiJKAe2UyKHBVjvJ9iL4bJ3HLH-z3oIP5KmW7s4czTq2fdO7yhssLMKxaGqhzsNViWv8FtvLJtHkTsprVQBeS2Eeqiw3iwcC4DGP4dUkj2Ex7DPqvxlA58Df0INgNz-kUPuURf0ZSK45v-s7F0f03BbwIu5eErLZhQckxs9tX6twSPXk2RaceOMA0-eKlbCh3FcpbFbfcimjOTcmpeUN7s-7C-XfM1vazIaIpTADwzZ9Ocs5TV~CU8TPyxZkx8FAAAA
eepdot.i2p=IXWmPK6689w4d2j-B-zQRdgG6md6UdvDZz9e3MnEWaoU7SiHk8Yrw4-2MSavhycFDkQNdpdJgYVhK0Ve8aF4TNf88KdI5SY4nl079CrjhTaNoQiUxOTJIUzTWGrDmNITfoV7PAjGWoMjw5dOcbK~PMpMTlOx9w8CtFEHU2cG4Gdcpu5NFpv6a5FWZNO-GRYrWfW1laZBviaE1Twwva-thbY~G5un9JFAoQZ88mS2JRKrxFuy1P1xCEEEiLvJfOcHVdL-0ebQV5q5Cwlyvrkki1se4URzlgF-FjQjTiaS0kbtHv6NkG-R19poUQu-ktMdMdDf2RLFBJPtyTvbyteyeXnI4LjGxSzQ8eH3txelIZKXwCRPYFNqutAmoQMqLf~NLDPtIq3QGp798sBbt3nooOKGEk3RVcJDwEzx~t3Qj6DWEA0xwSm56YYG84EYT2itIYGH8J4epFjGdV3zfWiME~QHLKfAwwkY-JdCiaDTk7RPfY1jkBw~SyI1jTYrX-PgAAAA
modulus.i2p=HL47CwJWiovjoHMTK~2tK6~sYEghJapUNkeqOdnkWFsf985N8eCpRUQtz7fFJZrHCTHjqeO5hxvcFye~Jjtf~id7pFlaotkpmbnx75x5HCiTIBLTUfbPurjhTXNK3A0JFxQ5lmYd8S5WqNykdaZGHjhfDSC~PUcfJA398S611e2D5se1LKAp0qFFhbNi69Unovj7HdXYJLiATDhewt7tY6mMa-p3BeTcvu26MhET4itYBhzp12AKiqWrEQM-4tpCmK5K7gxECHOTuMtZu7SEVc4CYAgBIsILtT0YA84441o8h0kpsKGDPFuszMWtz5XSIFlO7mwpu6PCmBx6FZbaZFSQ8CghxqhrWGnGIno-Lr9vKeTpD-AT7rqwuUrZHfd3yhGhOA7i0nM6Uh~qXbvFkzK83kyyxbX4InjAZ4g897LixiCc2qj6~eeh1c9oNcZ01-ho5U7lXUo5WrfG1-N7DeH2xkWGzFrjlZ3dXP8w~f7osxtth596Jz~cY8xNacMuAAAA
xolotl.i2p=byBX2FzIeKKRoUStOzidJ6jni4dkWBNK4x3khxgNo8NlJ8HqLl2MlgxlRsvhTHQy5L1GvTC9DYpnW0tE22Kl9QkPIGPikBLvwePT82gsRCxXOJ6CF8JiK0h9~giTOrRmguIo7d8DpDF3Sw7a9VVJf~9Dxs4IMCMPre36rPdmqkYSTZcfITfWCG1M1xnxQz9OuB6YKnnYcLvccSAcOTrWFEeiPHC0CsAo1EmJekmEGCK3lkNBRomfw05uhLqA4mFQgZbvHfSDhDv1eEUeQq8YBk4-vi4AsRjdmgLL4yduqoYnLgTYL6OvWuJ60EdxPkuZUzW8~5MGZfwKQLc4G0YmAjgLlQi1sdXT4HFFLNRvG2GO956Eay9Z3eLO2K7EvxY0C9MQ5n0LullF5NJD-V509h7kBPuItUC0haicVHXXwcXW-Lx36biz8apA9D24vMWvq4KLystsrtMetH1q1oyS4jA5BXWf~6FA~eCoZrX4lgnHJ6MHPCEmlUcQPrEvApIyAAAA
freeciv.nightblade.i2p=mDlAeW1BD7ictjta8G0YwebMBmATFIWVXrVF6gpm3A13NMi1LIaq5da~zuQBWD3BLYmblE-3-U4snRHttVJBr-e9jAvy7ZBskGqie30JbzPbTBzLJWqdpaKzahI-eiezoDgVyhOstCBbjFc3CW~ckiLfj4MVS2nCV7qgujSWHho8mspb8lS0uJJk0XQImYx4QWY7KFCz2CaHY8JX~m05~81hTdJLZUjhDVBXsjj0wW-lE8mDZckiCn7ZaRSASBhFHiZnB6aGdY32Ltad6SjH0LngOcVbR43DYSHODabD6igb90c00wruQRMFarQSBBAk31uWnjftggllcA0X77NILD0PKG8RGa2fRcqyyVveGgYWhsl8vInVI-q-xI1PWairamHkzd-du58cmxzkqYZQR~XX5tPR3lQ-avScr0HgxHkMuTZSxEh72BcQP-qw2m0mPDrE445EPw3ipk8pRQi6H8lS0kCfDi8DBEygnqoLIqu35wrpEICCyog1buvppZTfAAAA
curiosity.i2p=pka8sWaYxpm1U9gX6F7WJ56Kl12Y~Fks1w0O3DnpE2bfRUZldWpFdtnB6mHhqyinZKJWJVwVR92uFpQPdLtwjfCQtVzw7zXpndXG05QeRk4viRd6mxRua8xBgc2IFhXRXUDP3sSZT6aKFyP6S-R8yGkXID5fxrO~um5aBmHGalDohXHdPxbqrBbSWqjKHr35f7zSiJkCCRxDMGqWkrVuzoZIhJIVeRZxYaubKW9R-oJdmDqhtQeP7Au7IFzno66-fa9gsGWCWAyoJe0dk4tKGZA~Q-qlwW1I3esAnp2LPwlkrqPL-A5xqw1Gdwgh93rKGtUV41wKlhubfEiUXbkDgw6RLcpxXKbQxkJPUGum2-B9QVraAanVB1NDsW2nJvK0lb7SGgEHrENHW~Z5X8CANURb5PHR4CRcp~xwrbS3DPYAoiah7Rd7-PqcbXHxSu5jl4sVm3pZYKbo8V-PLKxIYzSWUhrmj1wGRTizeml0xvl5ysoDc5p2srCT-T4ihrKrAAAA
curiosity.i2p=7tcdVioqcmDw4nHxleQBPS8-wO4IsYwmBQbYBeLWAXQsSenRUu8epki-wEMiDkHBf5-5RuOC26vy11f3taRZdxY8zbTWpgZW0L9ABS3vjTd0jkpPjuKhAWnQGr65W3YgKLxfoGO6jpPXOj8M1Bhb2aHSjHYRTpHorzzrU2KL3RDrJTrfkPh-3zNwYYHiZhcw0usSxoH4Kmn1AwjfJtKkhjRdw3e33QwZpvHyNzK6y5bvq1q5aTP0P-zm6xzKnOI0Kss6cm6Com92Oz3Rj~6wuZNCRvcUYelneMvgR~USjd~6-0R~Yrw~NkVW1fZYPFXX0u5IinGHMKvVzyiTclBYiAxFvkeiVcnkI5AJ4tV7I8sMnKPNW0o00zdjEgzVjdnFN4HXTtQTvHeFnfUmfxPusgtm3JaQ2WVo88-Sx9VA-JS6mcvG46v3QbXK4YHPdIgCYZ0Gq1Ds0CYblOks8hnKfObhnkpEgUYSsaUOAQXHlRLh8~D3MZibNamxCy31QotcAAAA
anonymnet.i2p=Iku15m2pHtAfphzLKbhptluZuJ2FSst-b6WaiP-DWLue579sGChPuZEVxnbriFouidM4hw2Kvud1AeAYm632kDZZzIBf5EyCEdAPwc9R6cfBqe2N4hcvAXyWH9jevGMNe~NMOnjNvl8rKm2bGcEprvo1FPXKA1Ene2ATPViBVg-2xQyuZs9ZcU8rSi9~PrtG05zepAwfAGM2kIhNdF4biECXT-SLSxkU9An3GtAZjNkRMdiMPSY94mZWEalFbscuL8mlhl4xFg7XsnA1TEn6KaMnBjuf3FV9XSlXZ7pUD87BrRZYFHA0ujw5FqGu~-qLuIkbUpyhYz2rIHxNNJHm9l3T8yvumCanwr39xvganMLAD4nw~i0PKqQ5tLx0WBo64-GMz8Rbhfkhhj4a09qeR6OmDMwiyHoBgv9NDM6TCYBbLFMjf6X3wbcap4oOTNXvO3RG5T7kF-1RJ2IH6V3SwL9JpdH51fTo2WgBL9cESCvGntRM9g~EFyGyMoBAcmilAAAA
sasquotch.i2p=6Hv24DvlxKauUfLHN4cPJd13cRyFq1wqaCb95r7fCYvhdFzluuZLDBYCM3w9GM3EVyAXS8uz0BCy9ZYncWXCsFQYejkTj~pTJEo5QjM1DszuUAGvDrqlm21e-ZwzEY817dIQCp9dQShfSnP41J~jVktE3ryMmtzAnCIZ~nMUejc9wManQG4muxuyUADdIiBIp-MXOAdXeEaaP0~5WPZhf0okm6uJd~fvO4h1Sy-CQuuJzWKYzDOvHFu-~9ONYQV8EdP4wovC15uHgX79YwAd8bpNTcfzJN9ypr9cQ0zGxYzDuK9LHaSD5MXQ8Hu2RgIrKUfb~osG4MQuYalBkUr3dpLzXLEokNIPk~BI~fI9AuG27wLI47fvOUB8Zbu1J9rSgz-J-~x8Tp4UFvRKDJ2N~5rj9mMdm9iO~U8DJvkOjjRraXhwqh3YcrnhHLPia10jvu72dAGi~S4bxiHuEcDn1JZIj64Jb6TC6xfhwEn-uoE~dqXdbr0MMoVcDsOP5dPjAAAA
orz.i2p=GALU66G3Deh71g1COXAWKQvwVB2ULbf6ALTvc-vNHfZ~jkLhGpxmXOlVOu17hfWhB~whwj-~Hn7dxLgbXO1WXb-xmvKKIgxUssDosoSbM2v~LgzJq~iIkeoN9VQe4tz2pMql1-f3kV8-AHZIILR7tY61TjDLQbZSVhT0S7FIcTQulbct8wjP-p4Dvp4XUUFXJDRqIpZB9beNALEO-SYpFuadDeavDY4iUhNcERDXlj38ginXceHm5SXaiUNTr~qXzNNuR0r5FsAgjHLuy3UgnxM2aL~j7PXBieNL~mLs~YcYRYvZVtG~SXKQjUqw5iUJYYutgqHsiT7IrQKZ~FgvIYJWaZBgF-84WsfHgCw6jECkiiLgUxh676iNqMSfTkp0KMgjt~ekU9aWpZdiaD5KV2-phiuVMdEVbXCGfLnwSqWIQIrSAAXvsjL8ew7sCbtBOriGKUzeL~rBBltL6gSVdxn92fVdSz2XWGExvaT856ahgOtD8RFqCQTAUm~63xuSAAAA
@@ -184,4 +192,10 @@ detonate.i2p=6swCa-wAcpBdiCnMdObQlQ15isZTpAV8rlArR54Q5dZkALb5iMCjt0hYMoz89R9ycmt
tinyurl.i2p=mfjh9H8RPkh6vYTSpFoghxSnrOzwBeE8ri3mIKwQOak0v0382ZawEDavYKcjIGg6xqzC0KA8KcOu0h151HzHZgCq2Qbzb6VGGSyyXqtsPGxSIjifIveSkIqNZE~ThySOiWXJ9Qp1EmFe9zA2m6Ey~lZL~yD8ackLzB6VY32xQN7y8kvPSt39Boty0mPXSvS5faVAd0UkbuEHOCP~~Z-E~WB2L2-ZWUD~P3zrZK560fa9-aU8g6pCjIJ8Vqx19tbkvYA~IUPtgXsgmjX6hWLpxN1oHE18jsj1IcAu0TCfldWyOtqcGOeBo20BVLZnx7-AqS0TiZhoIj7nog3x3xoOH2w6JomdJ9AT~4Z08pz0HvymVud0PXKOU7yP~QBj1~Us05-k0F-W6SmU6906l5YfNoXACurwy0VtgTmv7oJyOWpPdhFkZ98PwPL16W4iYsvZx6lDxvCxViAkYLw-fAkmueKrn2ZBau6TlhksvZt-Fqyjs~iZ4yme1243d87IZNb5AAAA
b.i2p=b2Ol9hg3uAOK3mB7sK~ydOZQlZIcc0Cixh~M89i71VgzxsK6NYCvFuiW8YeDZ9p3e-cNQrGTxrV6405km0F~pfKKyu09E-~-zAM19SHFmVWbT-XLEpfo9krLQu7416VMRPsPfLISzRU2wAptW2N6CuQmOEp8telmilhUiEqYTC7GBzmo-XNIKs9XSs3-6wSLkZ0haRJtS7YDdmjrDE-UXhu55E7VhUnm7TAxSH4bdUtM5tDUU7Q8lq~t7y9qSu0EWwvMBnkR9-p8SlcVBr-Zy4aG10OUcvnA07TA~Y8OKtEzJZ1PvzuZVIKerAQXuLK~SMB-9Xni7v7pbGW6Lok6dG9KJjrLORTpURAFgC~mux096li0VcotR6eMfBwGH9zLWgBruknix3TCrmUoafzCP4vyuDrugN8-0k54HZemGb601~9PJQYwyP-V4b33LJdq1d-tKDvgFqR3mm-YrEgZbUQ22BSsI3Jx~tWc3hft8KHkfF3zV6Op4TB04XeQ-WupAAAA
beyond.i2p=WNtBKmOowfzhsCX7dWwddaKVZttpm6aSYDiXzExh2ABPS7Ts9WBZxkqWS8CdEHq6ZSOUGzT6kyQYRGmF9~uTZ~nkI-UG8Hqh6uibT0H6-NOR0k2Qjp5y49VB9VRHJ6I2Vxg1uo09edWYKUatMgl7GESInmhZPDF4NOAq1SD6TQpLfAvVxPWglISZWg2Br3K-u9cHqH3Q0fvyy-Cka9ALZiBkXlZ5vccy5B2KE2JXdI1fAn1TLx8dKuR11Xsd-aNjPofis1Ak-cTia9YjKYSiO4v0A7uguQwBZCmTwI-uXLzUeOZjg69NuIhFUi2hN3tuNvCqAg73ct0lAMnfZ1d3oT9BGW6Aqh4fMTXr8MPT0Qpq9te7KF7L5q-LJ-9JFnYVvjx0P-6YUe-mH89HwyL4nip-ET8jzgIRyYE-WNzc1Q5KOGcrQVZJl7Vv5lziCCMxYoSKqiThps1vv-lC1uEyPbMYANkJVJdx9bnTqlQ8r9rlmFFQEmBCojc7LeRZpslTAAAA
marcos.i2p=oN0aQlEygTHH1M2zLdkg1S8TcBaUIO4LQHC78R45ZImUCyOXVGzETk2AV6OD7LzLuzUOQSSfoHyVdctYMqq0buqdB6lzv1zKeNgofTHLvITEUJM5xemIz9vE91BIURjBpRVuonCGEQSBH~xqAsixv4OGtVyts5Nvle~vM~apZCBzEGEtWVikCyHSWL5MCBArbaEtYtCpUN-D2I628pb3JhbQppS3HCSa7b3tMFIIV-bqEjm1IIhvvcb5yyix78XKw~KAuE7KYvawXDOuJ0gtH6Hdox-UHMAWNP2~55oz-PPbchj3KvtL0yhI~6MJYKSpQEpoOsDpVDwmcnyU3V0NsEnab2Jjo0Cir7fmFIUjn9EWFjWk3yL5vd6qLxd2fjfO0Azqzb0uHp4wnP2sz25MPOpQMk-dNoX7DYI7LC9A3Ozu3yTisE62LVi8IRFB3p2O34DYN-flxR8ytn1B492x~0Ka80gd~HdiNo2rO4Qc04PILXe98NQj46kki8poho1DAAAA
ragnarok.i2p=mkRjUNekHYH3tpLDlt-qMSS5-SPPjLpvaIuCYOrNRM0ehuH93H74WuldWrDqzr77hLS3~JGD7qKVjaz9YDTDhK3dGV6K6xuXomJbShnYIQ0b-S0SZrhLXC8KIcp~DLQAW6rmoiIsyr99nZ1RPyVCOI~futZ9UnmZRsWbMu3z3dZfh7wGLeoAQAmnXW3IMxE7c0NFul6ojLGZkbK5Wrh1~gRESfB3TSF6TurQMaoL2Rf~ehVWFg6jtpDJ1UruwUWzyMapVqbe8Rftg9bXq6UlrBq61xuseNw20L1B-S0Gif2sC6DnhLT3k4MLnjVpB6OySQsuMoA67cnuLm8CZ~v9tH5~YlqU-hRH3yd-AFiMY4PkepI-Gjh--tr4NSNGsAlqM2XOf4z5XsdqQeT4GhWqXHjzb1nZcKmfumb69vOhWX9DuyVOq~wfY-RIuxlU08K0wV4IqmewK3TCIjIWTzpcxFwsZAGKx2QxMF8QXsQz1qTDSlzvX6hrnf1NXYXBPDVhAAAA
nano.i2p=oeXpaAMmVruU-7boMLZvnnLWvPnl-~WqLJKiJOigwiAs4c9umoCEo1gnN7IIr0XoloC4JmTZjsRfD-LE4kKKLKYMpw4hfGEQ74y~8fnf0Xe4gcG26l5O5RvKStZdK1pYzYioVfCgQk4a-RZfBQSd5l2mSrSHvpqzBmHrW9GZt56OymRELLHxfXn15AJXIRPdTtuiroXcKPMztZyvOsOWX47bkCcMfYTMJIrlYrlomh8uG4e45Kb8nX3uehy-kwPGFxMyJWMLs5ra7K~WzCzf6hbcdQ5vJVmYRj122WEjmtAl-gEKcK0XiLJMLF9y4aoIJGVHeA71a7mfKvSBEHjJ5BubC2IRoU5ZKxAkHtkOfoyDZ7ZxIB1r8~jAE2aPJzotQTN3l2gpcOFFcEapxXXWvFmIPjY0veorUGTjkXoGaLNl0-6JrH~w~5q2-bIt1mbWMySqX94YQmVTFYejArVS99NbdOIjb3VQoXKVXzjyu6Vr9BcfyYK9jMuqiztb-UfLAAAA
irc.orz.i2p=0oLxoY4VcI~e~OnFdHtYp9aghBcBLBK46BE-yffvaxsnsNgmf5xiRDORVJ0xjg6e8LZ-gTUXr5EHiJnJa5GEv3sris0PWEXW0QLV2WbkIOI3XNR8sa1WMAJYFX9wYSHNdGDuHj0Igcd9rxwaBufdcHeJ0WLvp1FXCMxEf9OTh52wL3ku5pPGlXaVp22RhJXqaKqGeIFh0A9t-gSF5cPB7D3jvoXPJlZtPFLkrsVQeDCpFC9TLaI2sebJjuEHaBhCLvjA8usG8WiZ0m-TVNwkg0XV~yy-rpOEGq-8CCLa1-7c6mc0gMmeolEFz9L9x6hXzw4TR0MXNMeu8nqeMhbvqws39iLiz1L2r6amzQrXjHReX1ibrEprNfLY~Ne1cgqQ5VsHpCr-UJv3D1QoFCJUwG7z0dJrgoi37aPRRjzUF-z67hi3z-ghKrY7d3m0YpTHUCwZ5o2uwUFM7xEvCXY4nQ8ndwEI0xJKfbBgWEmR8OxV3jbn9ZhSYSoygsbUQg55AAAA
utansans.i2p=87krkpFeYipyBHntsD2t-2A~pMW2jJG~3n2ZsvyOFeajYMAfr0s1G1EMxu~ZfywgFmWNir5pLlaXmAgd-IerlUnRF0FJrqqDlZVllYnAp-IZbGqg3PygDhSgYkuTh67l5wQEQTqwPMoz04F9O4w2GfzU-316xp634yWIYiFzeadHlnMVj-dsgM~namEo5dGmMWBpzk9iFkseTOtPJPC8e~TFrOhCPpPIbRPk6YkaVGJ0IyQ89bUmrRCH8ZiJuziY5i8S6YmGCuUIi5q9dLyu9~c5ucRQn13Ja9abWDXKgPF4MrErT4P5ATL-iQ~a00YF-YSXbSB~~0g3288QQwAN83oOKwSobL3vMXfmWyB7oA8zrjT8a4R4~tN3KXqbPTvkc-fZ-XDSaR57NssXLrz1DvJwhlQc2wcPAepFLX9c1IwgfF5coBwtZaq16gamvprRH9GCu4iWbjqhnQnaTiB~P1VqXRPGv5xI0JgcIgW0Q1H8vB9y2IQ~dW1gbGIiYdo9AAAA
blueheron.i2p=ZQkG-vHKcOcfg314mNZ9iNTP9Zl8W9H2gZzEPx9VesuIkeYRFQD1JbPvxWq5DBUfOzRb36c5pJeFuWWh35SViwmoJ~npcrqMWnfTtCq~JBYKCVYhpDZ-oo1Vg63GRwZ0hwaMv~l5jPWISdg~a5as-Cg69od~s86pdx9ci2GqSgINcyRMeBkWG2NEAUZSyhiMjSgW9lMVwA-5aXIiGlOaiKv4ldYZxwS9hpO0NLgWIIrQ7o2Nr5OL-Kh~FrShgSQlMSd0QwMFS0~QGKmcL7AmyIme3szLA9oN1bnKJ8ztRlTx3dzCI7nH9WXKmLWqvQck0PdRIT581rYw7qfU6uwN6UduS8enWnSXj5q47T6KzfWYBQu7AXzaatQGdMB6ct51Urfl56bRLT6Bst09Z2mvyjELlgQjGrVNoQlmRJ-e~y2kUmc-OYl0j~3g~EEiXnxmYXO3M6h54d60QcJgY7ccrWmnq7ayi~fBXXshHtZD~6w-cgQ4S5gBnpaEVG1Nt9xuAAAA

View File

@@ -1 +1 @@
<?xml version="1.0" encoding="iso-8859-1" standalone="yes" ?>
<?xml version="1.0" encoding="iso-8859-1" standalone="yes" ?>

View File

@@ -302,6 +302,19 @@ public class JobQueue {
}
return when;
}
/**
* When did the most recently begin job start?
*/
public long getLastJobEnd() {
long when = -1;
// not synchronized, so might b0rk if the runners are changed
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
long cur = ((JobQueueRunner)iter.next()).getLastEnd();
if (cur > when)
cur = when;
}
return when;
}
/**
* retrieve the most recently begin and still currently active job, or null if
* no jobs are running

View File

@@ -12,6 +12,7 @@ class JobQueueRunner implements Runnable {
private Job _currentJob;
private Job _lastJob;
private long _lastBegin;
private long _lastEnd;
public JobQueueRunner(RouterContext context, int id) {
_context = context;
@@ -33,6 +34,7 @@ class JobQueueRunner implements Runnable {
public void stopRunning() { _keepRunning = false; }
public void startRunning() { _keepRunning = true; }
public long getLastBegin() { return _lastBegin; }
public long getLastEnd() { return _lastEnd; }
public void run() {
long lastActive = _context.clock().now();
long jobNum = 0;
@@ -88,6 +90,7 @@ class JobQueueRunner implements Runnable {
lastActive = _context.clock().now();
_lastJob = _currentJob;
_currentJob = null;
_lastEnd = lastActive;
jobNum++;
//if ( (jobNum % 10) == 0)

View File

@@ -41,6 +41,8 @@ class RouterThrottleImpl implements RouterThrottle {
_context.statManager().createRateStat("router.throttleTunnelProcessingTime1m", "How long it takes to process a message (1 minute average) when we throttle a tunnel?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelProcessingTime10m", "How long it takes to process a message (10 minute average) when we throttle a tunnel?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelMaxExceeded", "How many tunnels we are participating in when we refuse one due to excees?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelProbTooFast", "How many tunnels beyond the previous 1h average are we participating in when we throttle?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelProbTestSlow", "How slow are our tunnel tests when our average exceeds the old average and we throttle?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
}
public boolean acceptNetworkMessage() {
@@ -128,6 +130,67 @@ class RouterThrottleImpl implements RouterThrottle {
return false;
}
if (numTunnels > getMinThrottleTunnels()) {
double growthFactor = getTunnelGrowthFactor();
Rate avgTunnels = _context.statManager().getRate("tunnel.participatingTunnels").getRate(60*60*1000);
if (avgTunnels != null) {
double avg = 0;
if (avgTunnels.getLastEventCount() > 0)
avg = avgTunnels.getAverageValue();
else
avg = avgTunnels.getLifetimeAverageValue();
if ( (avg > 0) && (avg*growthFactor < numTunnels) ) {
// we're accelerating, lets try not to take on too much too fast
double probAccept = (avg*growthFactor) / numTunnels;
int v = _context.random().nextInt(100);
if (v < probAccept*100) {
// ok
if (_log.shouldLog(Log.INFO))
_log.info("Probabalistically accept tunnel request (p=" + probAccept
+ " v=" + v + " avg=" + avg + " current=" + numTunnels + ")");
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Probabalistically refusing tunnel request (avg=" + avg
+ " current=" + numTunnels + ")");
_context.statManager().addRateData("router.throttleTunnelProbTooFast", (long)(numTunnels-avg), 0);
return false;
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Accepting tunnel request, since the average is " + avg
+ " and we only have " + numTunnels + ")");
}
}
Rate tunnelTestTime10m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(10*60*1000);
Rate tunnelTestTime60m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(60*60*1000);
if ( (tunnelTestTime10m != null) && (tunnelTestTime60m != null) && (tunnelTestTime10m.getLastEventCount() > 0) ) {
double avg10m = tunnelTestTime10m.getAverageValue();
double avg60m = 0;
if (tunnelTestTime60m.getLastEventCount() > 0)
avg60m = tunnelTestTime60m.getAverageValue();
else
avg60m = tunnelTestTime60m.getLifetimeAverageValue();
if ( (avg60m > 0) && (avg10m > avg60m * growthFactor) ) {
double probAccept = (avg60m*growthFactor)/avg10m;
int v = _context.random().nextInt(100);
if (v < probAccept*100) {
// ok
if (_log.shouldLog(Log.INFO))
_log.info("Probabalistically accept tunnel request (p=" + probAccept
+ " v=" + v + " test time avg 10m=" + avg10m + " 60m=" + avg60m + ")");
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Probabalistically refusing tunnel request (test time avg 10m=" + avg10m
+ " 60m=" + avg60m + ")");
_context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg10m-avg60m), 0);
return false;
}
}
}
}
String maxTunnels = _context.getProperty(PROP_MAX_TUNNELS);
if (maxTunnels != null) {
try {
@@ -154,6 +217,22 @@ class RouterThrottleImpl implements RouterThrottle {
return true;
}
/** dont ever probabalistically throttle tunnels if we have less than this many */
private int getMinThrottleTunnels() {
try {
return Integer.parseInt(_context.getProperty("router.minThrottleTunnels", "40"));
} catch (NumberFormatException nfe) {
return 40;
}
}
private double getTunnelGrowthFactor() {
try {
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "1.5"));
} catch (NumberFormatException nfe) {
return 1.5;
}
}
public long getMessageDelay() {
Rate delayRate = _context.statManager().getRate("transport.sendProcessingTime").getRate(60*1000);

View File

@@ -15,8 +15,8 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.50 $ $Date: 2004/10/09 19:03:27 $";
public final static String VERSION = "0.4.1.2";
public final static String ID = "$Revision: 1.59 $ $Date: 2004/10/17 16:03:05 $";
public final static String VERSION = "0.4.1.3";
public final static long BUILD = 0;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);

View File

@@ -1,6 +1,8 @@
package net.i2p.router;
import net.i2p.data.DataHelper;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.util.Log;
/**
@@ -45,7 +47,38 @@ class RouterWatchdog implements Runnable {
}
private boolean shutdownOnHang() {
return true;
return Boolean.valueOf(_context.getProperty("watchdog.haltOnHang", "false")).booleanValue();
}
private void dumpStatus() {
if (_log.shouldLog(Log.ERROR)) {
Job cur = _context.jobQueue().getLastJob();
if (cur != null)
_log.error("Most recent job: " + cur);
_log.error("Last job began: "
+ DataHelper.formatDuration(_context.clock().now()-_context.jobQueue().getLastJobBegin())
+ " ago");
_log.error("Last job ended: "
+ DataHelper.formatDuration(_context.clock().now()-_context.jobQueue().getLastJobEnd())
+ " ago");
_log.error("Ready and waiting jobs: " + _context.jobQueue().getReadyCount());
_log.error("Job lag: " + _context.jobQueue().getMaxLag());
_log.error("Participating tunnel count: " + _context.tunnelManager().getParticipatingCount());
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
Rate r = null;
if (rs != null)
r = rs.getRate(60*1000);
double processTime = (r != null ? r.getAverageValue() : 0);
_log.error("1minute send processing time: " + processTime);
rs = _context.statManager().getRate("bw.sendBps");
r = null;
if (rs != null)
r = rs.getRate(60*1000);
double kbps = (r != null ? r.getAverageValue() : 0);
_log.error("Outbound send rate: " + kbps + "KBps");
}
}
public void run() {
@@ -59,9 +92,14 @@ class RouterWatchdog implements Runnable {
boolean ok = verifyJobQueueLiveliness();
ok = ok && verifyClientLiveliness();
if (!ok && shutdownOnHang()) {
_log.log(Log.CRIT, "Router hung! hard restart!");
System.exit(Router.EXIT_HARD_RESTART);
if (!ok) {
dumpStatus();
if (shutdownOnHang()) {
_log.log(Log.CRIT, "Router hung! hard restart!");
try { Thread.sleep(30*1000); } catch (InterruptedException ie) {}
// halt and not system.exit, since some of the shutdown hooks might be misbehaving
Runtime.getRuntime().halt(Router.EXIT_HARD_RESTART);
}
}
}
}

View File

@@ -102,30 +102,30 @@ public class StatisticsManager implements Service {
stats.putAll(_context.profileManager().summarizePeers(_publishedStats));
includeThroughput(stats);
includeRate("transport.sendProcessingTime", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("transport.sendProcessingTime", stats, new long[] { 60*60*1000 });
//includeRate("tcp.queueSize", stats);
includeRate("jobQueue.jobLag", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("jobQueue.jobRun", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*1000, 60*60*1000 });
//includeRate("jobQueue.jobLag", stats, new long[] { 60*1000, 60*60*1000 });
//includeRate("jobQueue.jobRun", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 });
//includeRate("crypto.garlic.decryptFail", stats, new long[] { 60*60*1000, 24*60*60*1000 });
includeRate("tunnel.unknownTunnelTimeLeft", stats, new long[] { 60*60*1000, 24*60*60*1000 });
includeRate("jobQueue.readyJobs", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("tunnel.unknownTunnelTimeLeft", stats, new long[] { 60*60*1000 });
//includeRate("jobQueue.readyJobs", stats, new long[] { 60*60*1000 });
//includeRate("jobQueue.droppedJobs", stats, new long[] { 60*60*1000, 24*60*60*1000 });
includeRate("inNetPool.dropped", stats, new long[] { 60*60*1000, 24*60*60*1000 });
//includeRate("inNetPool.dropped", stats, new long[] { 60*60*1000, 24*60*60*1000 });
includeRate("tunnel.participatingTunnels", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("tunnel.testSuccessTime", stats, new long[] { 60*60*1000l, 24*60*60*1000l });
//includeRate("tunnel.outboundMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("tunnel.inboundMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
includeRate("tunnel.participatingMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
includeRate("tunnel.participatingMessagesProcessedActive", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("tunnel.participatingMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("tunnel.participatingMessagesProcessedActive", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("tunnel.expiredAfterAcceptTime", stats, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
includeRate("tunnel.bytesAllocatedAtAccept", stats, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
includeRate("netDb.lookupsReceived", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("netDb.lookupsHandled", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("netDb.lookupsMatched", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("tunnel.bytesAllocatedAtAccept", stats, new long[] { 60*60*1000l });
includeRate("netDb.lookupsReceived", stats, new long[] { 60*60*1000 });
//includeRate("netDb.lookupsHandled", stats, new long[] { 60*60*1000 });
includeRate("netDb.lookupsMatched", stats, new long[] { 60*60*1000 });
//includeRate("netDb.storeSent", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("netDb.successPeers", stats, new long[] { 60*60*1000 });
includeRate("netDb.failedPeers", stats, new long[] { 60*60*1000 });
//includeRate("netDb.failedPeers", stats, new long[] { 60*60*1000 });
//includeRate("router.throttleNetDbDoSSend", stats, new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
//includeRate("router.throttleNetDbDoS", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("netDb.searchCount", stats, new long[] { 3*60*60*1000});
@@ -141,11 +141,11 @@ public class StatisticsManager implements Service {
//includeRate("transport.receiveMessageSmall", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.receiveMessageMedium", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.receiveMessageLarge", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("client.sendAckTime", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
includeRate("client.sendsPerFailure", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
includeRate("client.timeoutCongestionTunnel", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
includeRate("client.timeoutCongestionMessage", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
includeRate("client.timeoutCongestionInbound", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true);
//includeRate("client.sendsPerFailure", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
//includeRate("client.timeoutCongestionTunnel", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
//includeRate("client.timeoutCongestionMessage", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
//includeRate("client.timeoutCongestionInbound", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime()));
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
_log.debug("Publishing peer rankings");
@@ -257,12 +257,12 @@ public class StatisticsManager implements Service {
String throughputRate = renderThroughput(sendBytes5m, 5*60*1000);
stats.setProperty("stat_bandwidthSendBps.5m", throughputRate);
throughputRate = renderThroughput(sendBytes60m, 60*60*1000);
stats.setProperty("stat_bandwidthSendBps.60m", throughputRate);
//throughputRate = renderThroughput(sendBytes60m, 60*60*1000);
//stats.setProperty("stat_bandwidthSendBps.60m", throughputRate);
throughputRate = renderThroughput(recvBytes5m, 5*60*1000);
stats.setProperty("stat_bandwidthReceiveBps.5m", throughputRate);
throughputRate = renderThroughput(recvBytes60m, 60*60*1000);
stats.setProperty("stat_bandwidthReceiveBps.60m", throughputRate);
//throughputRate = renderThroughput(recvBytes60m, 60*60*1000);
//stats.setProperty("stat_bandwidthReceiveBps.60m", throughputRate);
}

View File

@@ -328,12 +328,12 @@ public class TunnelInfo extends DataStructureImpl {
int i = 0;
while (cur != null) {
buf.append("\n*Hop ").append(i).append(": ").append(cur.getThisHop());
if (cur.getEncryptionKey() != null)
buf.append("\n Encryption key: ").append(cur.getEncryptionKey());
if (cur.getSigningKey() != null)
buf.append("\n Signing key: ").append(cur.getSigningKey());
if (cur.getVerificationKey() != null)
buf.append("\n Verification key: ").append(cur.getVerificationKey());
//if (cur.getEncryptionKey() != null)
// buf.append("\n Encryption key: ").append(cur.getEncryptionKey());
//if (cur.getSigningKey() != null)
// buf.append("\n Signing key: ").append(cur.getSigningKey());
//if (cur.getVerificationKey() != null)
// buf.append("\n Verification key: ").append(cur.getVerificationKey());
if (cur.getDestination() != null)
buf.append("\n Destination: ").append(cur.getDestination().calculateHash().toBase64());
if (cur.getNextHop() != null)
@@ -344,7 +344,7 @@ public class TunnelInfo extends DataStructureImpl {
buf.append("\n Expiration: ").append("none");
else
buf.append("\n Expiration: ").append(new Date(cur.getSettings().getExpiration()));
buf.append("\n Ready: ").append(getIsReady());
//buf.append("\n Ready: ").append(getIsReady());
cur = cur.getNextHopInfo();
i++;
}

View File

@@ -72,6 +72,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
private static final long MAX_TIME_TO_REBUILD = 5*60*1000;
public boolean verifyClientLiveliness() {
if (_manager == null) return true;
boolean lively = true;
for (Iterator iter = _manager.getRunnerDestinations().iterator(); iter.hasNext(); ) {
Destination dest = (Destination)iter.next();

View File

@@ -54,6 +54,7 @@ public class SendTunnelMessageJob extends JobImpl {
private long _timeout;
private long _expiration;
private int _priority;
private int _state;
public SendTunnelMessageJob(RouterContext ctx, I2NPMessage msg, TunnelId tunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) {
this(ctx, msg, tunnelId, null, null, onSend, onReply, onFailure, selector, timeoutMs, priority);
@@ -61,11 +62,14 @@ public class SendTunnelMessageJob extends JobImpl {
public SendTunnelMessageJob(RouterContext ctx, I2NPMessage msg, TunnelId tunnelId, Hash targetRouter, TunnelId targetTunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) {
super(ctx);
_state = 0;
_log = ctx.logManager().getLog(SendTunnelMessageJob.class);
if (msg == null)
throw new IllegalArgumentException("wtf, null message? sod off");
if (tunnelId == null)
throw new IllegalArgumentException("wtf, no tunnelId? nuh uh");
_state = 1;
_message = msg;
_destRouter = targetRouter;
_tunnelId = tunnelId;
@@ -92,9 +96,11 @@ public class SendTunnelMessageJob extends JobImpl {
} else {
_expiration = getContext().clock().now() + timeoutMs;
}
_state = 2;
}
public void runJob() {
_state = 3;
TunnelInfo info = getContext().tunnelManager().getTunnelInfo(_tunnelId);
if (info == null) {
if (_log.shouldLog(Log.DEBUG))
@@ -108,7 +114,9 @@ public class SendTunnelMessageJob extends JobImpl {
getContext().jobQueue().addJob(_onFailure);
return;
} else {
_state = 4;
forwardToGateway();
_state = 0;
return;
}
}
@@ -118,13 +126,19 @@ public class SendTunnelMessageJob extends JobImpl {
if (isEndpoint(info)) {
if (_log.shouldLog(Log.INFO))
_log.info("Tunnel message where we're both the gateway and the endpoint - honor instructions");
_state = 5;
honorInstructions(info);
_state = 0;
return;
} else if (isGateway(info)) {
_state = 6;
handleAsGateway(info);
_state = 0;
return;
} else {
_state = 7;
handleAsParticipant(info);
_state = 0;
return;
}
}
@@ -134,6 +148,7 @@ public class SendTunnelMessageJob extends JobImpl {
*
*/
private void forwardToGateway() {
_state = 8;
TunnelMessage msg = new TunnelMessage(getContext());
msg.setData(_message.toByteArray());
msg.setTunnelId(_tunnelId);
@@ -148,6 +163,7 @@ public class SendTunnelMessageJob extends JobImpl {
String bodyType = _message.getClass().getName();
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(),
TunnelMessage.class.getName(), msg.getUniqueId());
_state = 9;
return;
}
@@ -157,6 +173,7 @@ public class SendTunnelMessageJob extends JobImpl {
*
*/
private void handleAsGateway(TunnelInfo info) {
_state = 10;
// since we are the gateway, we don't need to verify the data structures
TunnelInfo us = getUs(info);
if (us == null) {
@@ -164,7 +181,9 @@ public class SendTunnelMessageJob extends JobImpl {
_log.error("We are not participating in this /known/ tunnel - was the router reset?");
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 11;
} else {
_state = 12;
// we're the gateway, so sign, encrypt, and forward to info.getNextHop()
TunnelMessage msg = prepareMessage(info);
if (msg == null) {
@@ -172,6 +191,7 @@ public class SendTunnelMessageJob extends JobImpl {
_log.error("wtf, unable to prepare a tunnel message to the next hop, when we're the gateway and hops remain? tunnel: " + info);
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 13;
return;
}
if (_log.shouldLog(Log.DEBUG))
@@ -185,6 +205,7 @@ public class SendTunnelMessageJob extends JobImpl {
+ (now - _expiration) + "ms ago)");
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 14;
return;
}else if (_expiration < now + 15*1000) {
if (_log.shouldLog(Log.WARN))
@@ -198,6 +219,7 @@ public class SendTunnelMessageJob extends JobImpl {
_selector,
(int)(_expiration - getContext().clock().now()),
_priority));
_state = 15;
}
}
@@ -207,6 +229,7 @@ public class SendTunnelMessageJob extends JobImpl {
*
*/
private void handleAsParticipant(TunnelInfo info) {
_state = 16;
// SendTunnelMessageJob shouldn't be used for participants!
if (_log.shouldLog(Log.DEBUG))
_log.debug("SendTunnelMessageJob for a participant... ", getAddedBy());
@@ -216,6 +239,7 @@ public class SendTunnelMessageJob extends JobImpl {
_log.error("Cannot inject non-tunnel messages as a participant!" + _message, getAddedBy());
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 17;
return;
}
@@ -227,18 +251,23 @@ public class SendTunnelMessageJob extends JobImpl {
_log.error("No verification key for the participant? tunnel: " + info, getAddedBy());
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 18;
return;
}
boolean ok = struct.verifySignature(getContext(), info.getVerificationKey().getKey());
_state = 19;
if (!ok) {
if (_log.shouldLog(Log.WARN))
_log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy());
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 20;
return;
} else {
_state = 21;
if (info.getNextHop() != null) {
_state = 22;
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + info.getTunnelId().getTunnelId() + " received where we're not the gateway and there are remaining hops, so forward it on to "
+ info.getNextHop().toBase64() + " via SendMessageDirectJob");
@@ -247,12 +276,15 @@ public class SendTunnelMessageJob extends JobImpl {
(int)(_message.getMessageExpiration().getTime() - getContext().clock().now()),
_priority);
getContext().jobQueue().addJob(j);
_state = 23;
return;
} else {
_state = 24;
if (_log.shouldLog(Log.ERROR))
_log.error("Should not be reached - participant, but no more hops?!");
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 25;
return;
}
}
@@ -261,19 +293,23 @@ public class SendTunnelMessageJob extends JobImpl {
/** find our place in the tunnel */
private TunnelInfo getUs(TunnelInfo info) {
_state = 26;
Hash us = getContext().routerHash();
TunnelInfo lastUs = null;
while (info != null) {
if (us.equals(info.getThisHop()))
lastUs = info;
info = info.getNextHopInfo();
_state = 28;
}
_state = 27;
return lastUs;
}
/** are we the endpoint for the tunnel? */
private boolean isEndpoint(TunnelInfo info) {
TunnelInfo us = getUs(info);
_state = 29;
if (us == null) return false;
return (us.getNextHop() == null);
}
@@ -281,6 +317,7 @@ public class SendTunnelMessageJob extends JobImpl {
/** are we the gateway for the tunnel? */
private boolean isGateway(TunnelInfo info) {
TunnelInfo us = getUs(info);
_state = 30;
if (us == null) return false;
return (us.getSigningKey() != null); // only the gateway can sign
}
@@ -294,6 +331,7 @@ public class SendTunnelMessageJob extends JobImpl {
*
*/
private TunnelMessage prepareMessage(TunnelInfo info) {
_state = 31;
TunnelMessage msg = new TunnelMessage(getContext());
SessionKey key = getContext().keyGenerator().generateSessionKey();
@@ -307,6 +345,7 @@ public class SendTunnelMessageJob extends JobImpl {
// but if we are, have the endpoint forward it appropriately.
// note that this algorithm does not currently support instructing the endpoint to send to a Destination
if (_destRouter != null) {
_state = 32;
instructions.setRouter(_destRouter);
if (_targetTunnelId != null) {
if (_log.shouldLog(Log.DEBUG))
@@ -320,6 +359,7 @@ public class SendTunnelMessageJob extends JobImpl {
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
}
} else {
_state = 33;
if (_message instanceof DataMessage) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions are for local message delivery at the endpoint with a DataMessage to be sent to a Destination");
@@ -334,19 +374,26 @@ public class SendTunnelMessageJob extends JobImpl {
if (info == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel info is null to send message " + _message);
_state = 34;
return null;
} else if ( (info.getEncryptionKey() == null) || (info.getEncryptionKey().getKey() == null) ) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel encryption key is null when we're the gateway?! info: " + info);
_state = 35;
return null;
}
_state = 36;
byte encryptedInstructions[] = encrypt(instructions, info.getEncryptionKey().getKey(), INSTRUCTIONS_PADDING);
byte encryptedMessage[] = encrypt(_message, key, PAYLOAD_PADDING);
_state = 37;
TunnelVerificationStructure verification = createVerificationStructure(encryptedMessage, info);
_state = 38;
String bodyType = _message.getClass().getName();
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
_state = 39;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel message prepared: instructions = " + instructions);
@@ -355,6 +402,7 @@ public class SendTunnelMessageJob extends JobImpl {
msg.setEncryptedDeliveryInstructions(encryptedInstructions);
msg.setTunnelId(_tunnelId);
msg.setVerificationStructure(verification);
_state = 40;
return msg;
}
@@ -363,9 +411,12 @@ public class SendTunnelMessageJob extends JobImpl {
*
*/
private TunnelVerificationStructure createVerificationStructure(byte encryptedMessage[], TunnelInfo info) {
_state = 41;
TunnelVerificationStructure struct = new TunnelVerificationStructure();
struct.setMessageHash(getContext().sha().calculateHash(encryptedMessage));
struct.sign(getContext(), info.getSigningKey().getKey());
_state = 42;
return struct;
}
@@ -375,6 +426,7 @@ public class SendTunnelMessageJob extends JobImpl {
* @param paddedSize minimum size to pad to
*/
private byte[] encrypt(DataStructure struct, SessionKey key, int paddedSize) {
_state = 43;
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(paddedSize);
byte data[] = struct.toByteArray();
@@ -383,11 +435,13 @@ public class SendTunnelMessageJob extends JobImpl {
byte iv[] = new byte[16];
Hash h = getContext().sha().calculateHash(key.getData());
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
_state = 44;
return getContext().aes().safeEncrypt(baos.toByteArray(), key, iv, paddedSize);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out data to encrypt", ioe);
}
_state = 45;
return null;
}
@@ -398,6 +452,7 @@ public class SendTunnelMessageJob extends JobImpl {
*
*/
private void honorInstructions(TunnelInfo info) {
_state = 46;
if (_selector != null)
createFakeOutNetMessage();
@@ -405,6 +460,7 @@ public class SendTunnelMessageJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Firing onSend as we're honoring the instructions");
getContext().jobQueue().addJob(_onSend);
_state = 47;
}
// since we are the gateway, we don't need to decrypt the delivery instructions or the payload
@@ -412,9 +468,13 @@ public class SendTunnelMessageJob extends JobImpl {
RouterIdentity ident = getContext().router().getRouterInfo().getIdentity();
if (_destRouter != null) {
_state = 48;
honorSendRemote(info, ident);
_state = 49;
} else {
_state = 50;
honorSendLocal(info, ident);
_state = 51;
}
}
@@ -424,6 +484,7 @@ public class SendTunnelMessageJob extends JobImpl {
*
*/
private void honorSendRemote(TunnelInfo info, RouterIdentity ident) {
_state = 52;
I2NPMessage msg = null;
if (_targetTunnelId != null) {
if (_log.shouldLog(Log.DEBUG))
@@ -438,7 +499,9 @@ public class SendTunnelMessageJob extends JobImpl {
byte data[] = _message.toByteArray();
tmsg.setData(data);
msg = tmsg;
_state = 53;
} else {
_state = 54;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Forward " + _message.getClass().getName()
+ " message off to remote router " + _destRouter.toBase64());
@@ -464,15 +527,18 @@ public class SendTunnelMessageJob extends JobImpl {
}
}
_state = 55;
String bodyType = _message.getClass().getName();
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(),
TunnelMessage.class.getName(), msg.getUniqueId());
_state = 56;
// don't specify a selector, since createFakeOutNetMessage already does that
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, _destRouter,
_onSend, _onReply, _onFailure,
null, (int)(timeLeft),
_priority);
_state = 57;
getContext().jobQueue().addJob(j);
}
@@ -483,16 +549,20 @@ public class SendTunnelMessageJob extends JobImpl {
*
*/
private void honorSendLocal(TunnelInfo info, RouterIdentity ident) {
_state = 59;
if ( (info.getDestination() == null) || !(_message instanceof DataMessage) ) {
// its a network message targeting us...
if (_log.shouldLog(Log.DEBUG))
_log.debug("Destination is null or its not a DataMessage - pass it off to the InNetMessagePool");
_state = 59;
InNetMessage msg = new InNetMessage(getContext());
msg.setFromRouter(ident);
msg.setFromRouterHash(ident.getHash());
msg.setMessage(_message);
getContext().inNetMessagePool().add(msg);
_state = 60;
} else {
_state = 61;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Destination is not null and it is a DataMessage - pop it into the ClientMessagePool");
DataMessage msg = (DataMessage)_message;
@@ -502,6 +572,7 @@ public class SendTunnelMessageJob extends JobImpl {
_log.warn("Duplicate data message received [" + msg.getUniqueId() + " expiring on " + msg.getMessageExpiration() + "]");
getContext().messageHistory().droppedOtherMessage(msg);
getContext().messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), "Duplicate");
_state = 62;
return;
}
@@ -518,10 +589,12 @@ public class SendTunnelMessageJob extends JobImpl {
clientMessage.setReceptionInfo(receptionInfo);
getContext().clientMessagePool().add(clientMessage);
getContext().messageHistory().receivePayloadMessage(msg.getUniqueId());
_state = 63;
}
}
private void createFakeOutNetMessage() {
_state = 64;
// now we create a fake outNetMessage to go onto the registry so we can select
if (_log.shouldLog(Log.DEBUG))
_log.debug("Registering a fake outNetMessage for the message tunneled locally since we have a selector");
@@ -538,7 +611,8 @@ public class SendTunnelMessageJob extends JobImpl {
getContext().messageRegistry().registerPending(outM);
// we dont really need the data
outM.discardData();
_state = 65;
}
public String getName() { return "Send Tunnel Message"; }
public String getName() { return "Send Tunnel Message" + (_state == 0 ? "" : ""+_state); }
}

View File

@@ -87,8 +87,12 @@ public class CapacityCalculator extends Calculator {
long failed = 0;
if (curFailed != null)
failed = curFailed.getCurrentEventCount() + curFailed.getLastEventCount();
if (failed > 0)
val -= failed * stretch;
if (failed > 0) {
if ( (period == 10*60*1000) && (curFailed.getCurrentEventCount() > 0) )
return 0.0d; // their tunnels have failed in the last 0-10 minutes
else
val -= failed * stretch;
}
if ( (period == 10*60*1000) && (curRejected.getCurrentEventCount() + curRejected.getLastEventCount() > 0) )
return 0.0d;

View File

@@ -46,13 +46,16 @@ public class IsFailingCalculator extends Calculator {
// return true;
//}
Rate rejectRate = profile.getTunnelHistory().getRejectionRate().getRate(10*60*1000);
if (rejectRate.getCurrentEventCount() >= 2) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer " + profile.getPeer().toBase64()
+ " is failing because they rejected some tunnels recently");
return true;
}
// this doesn't make sense with probabalistic rejections - we should be
// adequately dampening the capacity so these peers aren't queried
//Rate rejectRate = profile.getTunnelHistory().getRejectionRate().getRate(10*60*1000);
//if (rejectRate.getCurrentEventCount() >= 2) {
// if (_log.shouldLog(Log.DEBUG))
// _log.debug("Peer " + profile.getPeer().toBase64()
// + " is failing because they rejected some tunnels recently");
// return true;
//}
////
// the right way to behave would be to use some statistical

View File

@@ -64,25 +64,30 @@ public class TCPAddress {
return;
}
String host = addr.getOptions().getProperty(PROP_HOST);
try {
InetAddress iaddr = InetAddress.getByName(host);
_host = iaddr.getHostAddress();
_addr = iaddr;
String port = addr.getOptions().getProperty(PROP_PORT);
if ( (port != null) && (port.trim().length() > 0) ) {
try {
_port = Integer.parseInt(port);
} catch (NumberFormatException nfe) {
_log.error("Invalid port [" + port + "]", nfe);
_port = -1;
}
} else {
_port = -1;
}
} catch (UnknownHostException uhe) {
if (host == null) {
_host = null;
_port = -1;
} else {
try {
InetAddress iaddr = InetAddress.getByName(host.trim());
_host = iaddr.getHostAddress();
_addr = iaddr;
String port = addr.getOptions().getProperty(PROP_PORT);
if ( (port != null) && (port.trim().length() > 0) ) {
try {
_port = Integer.parseInt(port.trim());
} catch (NumberFormatException nfe) {
_log.error("Invalid port [" + port + "]", nfe);
_port = -1;
}
} else {
_port = -1;
}
} catch (UnknownHostException uhe) {
_host = null;
_port = -1;
}
}
}
@@ -146,21 +151,20 @@ public class TCPAddress {
if (_addr != null)
rv += _addr.getHostAddress().hashCode();
else
if (_host != null) rv += _host.hashCode();
if (_host != null) rv += _host.trim().hashCode();
return rv;
}
public boolean equals(Object val) {
if ( (val != null) && (val instanceof TCPAddress) ) {
TCPAddress addr = (TCPAddress)val;
if ( (_addr != null) && (_addr.getHostAddress() != null)
&& (addr.getAddress() != null) && (addr.getAddress().getHostAddress() != null) ) {
return DataHelper.eq(getAddress().getHostAddress(), addr.getAddress().getHostAddress())
&& (getPort() == addr.getPort());
} else {
return DataHelper.eq(getHost(), addr.getHost())
&& (getPort() == addr.getPort());
}
String hostname = null;
if (addr.getHost() != null)
hostname = addr.getHost().trim();
String ourHost = getHost();
if (ourHost != null)
ourHost = ourHost.trim();
return DataHelper.eq(hostname, ourHost) && getPort() == addr.getPort();
}
return false;
}

View File

@@ -174,12 +174,17 @@ public class TCPConnection {
}
}
private boolean shouldDropProbabalistically() {
return Boolean.valueOf(_context.getProperty("tcp.dropProbabalistically", "false")).booleanValue();
}
/**
* Implement a probabalistic dropping of messages on the queue to the
* peer along the lines of RFC2309.
*
*/
private void locked_throttle() {
if (!shouldDropProbabalistically()) return;
int bytesQueued = 0;
long earliestExpiration = -1;
for (int i = 0; i < _pendingMessages.size(); i++) {

View File

@@ -524,7 +524,7 @@ public class TCPTransport extends TransportImpl {
String port = _context.getProperty(LISTEN_PORT, DEFAULT_LISTEN_PORT+"");
if (port != null) {
try {
int portNum = Integer.parseInt(port);
int portNum = Integer.parseInt(port.trim());
if ( (portNum >= 1) && (portNum < 65535) )
return portNum;
} catch (NumberFormatException nfe) {

View File

@@ -22,6 +22,8 @@ import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.stat.RateStat;
import net.i2p.stat.Rate;
import net.i2p.util.Log;
class TestTunnelJob extends JobImpl {
@@ -71,9 +73,57 @@ class TestTunnelJob extends JobImpl {
return false;
}
private final static long TEST_TIMEOUT = 30*1000; // 30 seconds for a test to succeed
private final static long DEFAULT_TEST_TIMEOUT = 10*1000; // 10 seconds for a test to succeed
private final static long DEFAULT_MINIMUM_TEST_TIMEOUT = 5*1000; // 5 second min
private final static int TEST_PRIORITY = 100;
/**
* how long should we let tunnel tests go on for?
*/
private long getTunnelTestTimeout() {
long rv = DEFAULT_TEST_TIMEOUT;
RateStat rs = getContext().statManager().getRate("tunnel.testSuccessTime");
if (rs != null) {
Rate r = rs.getRate(10*60*1000);
if (r != null) {
if (r.getLifetimeEventCount() > 0) {
if (r.getLastEventCount() <= 0)
rv = (long)(r.getLifetimeAverageValue() * getTunnelTestDeviationLimit());
else
rv = (long)(r.getAverageValue() * getTunnelTestDeviationLimit());
}
}
}
long min = getMinimumTestTimeout();
if (rv < min)
rv = min;
return rv;
}
/**
* How much greater than the current average tunnel test time should we accept?
*/
private double getTunnelTestDeviationLimit() {
try {
return Double.parseDouble(getContext().getProperty("router.tunnelTestDeviation", "2.0"));
} catch (NumberFormatException nfe) {
return 2.0;
}
}
private long getMinimumTestTimeout() {
String timeout = getContext().getProperty("router.tunnelTestMinimum", ""+DEFAULT_MINIMUM_TEST_TIMEOUT);
if (timeout != null) {
try {
return Long.parseLong(timeout);
} catch (NumberFormatException nfe) {
return DEFAULT_MINIMUM_TEST_TIMEOUT;
}
} else {
return DEFAULT_MINIMUM_TEST_TIMEOUT;
}
}
/**
* Send a message out the tunnel with instructions to send the message back
* to ourselves and wait for it to arrive.
@@ -96,7 +146,7 @@ class TestTunnelJob extends JobImpl {
TestFailedJob failureJob = new TestFailedJob();
MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId());
SendTunnelMessageJob testJob = new SendTunnelMessageJob(getContext(), msg, info.getTunnelId(), us, _secondaryId, null, new TestSuccessfulJob(), failureJob, selector, TEST_TIMEOUT, TEST_PRIORITY);
SendTunnelMessageJob testJob = new SendTunnelMessageJob(getContext(), msg, info.getTunnelId(), us, _secondaryId, null, new TestSuccessfulJob(), failureJob, selector, getTunnelTestTimeout(), TEST_PRIORITY);
getContext().jobQueue().addJob(testJob);
}
@@ -121,7 +171,7 @@ class TestTunnelJob extends JobImpl {
TestFailedJob failureJob = new TestFailedJob();
MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId());
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, _secondaryId, info.getThisHop(), info.getTunnelId(), null, new TestSuccessfulJob(), failureJob, selector, TEST_TIMEOUT, TEST_PRIORITY);
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, _secondaryId, info.getThisHop(), info.getTunnelId(), null, new TestSuccessfulJob(), failureJob, selector, getTunnelTestTimeout(), TEST_PRIORITY);
getContext().jobQueue().addJob(j);
}
@@ -216,6 +266,11 @@ class TestTunnelJob extends JobImpl {
if (_log.shouldLog(Log.INFO))
_log.info("Test of tunnel " + _primaryId+ " successfull after "
+ time + "ms waiting for " + _nonce);
if (time > getTunnelTestTimeout()) {
return; // the test failed job should already have run
}
TunnelInfo info = _pool.getTunnelInfo(_primaryId);
if (info != null) {
TestTunnelJob.this.getContext().messageHistory().tunnelValid(info, time);
@@ -254,7 +309,7 @@ class TestTunnelJob extends JobImpl {
_id = id;
_tunnelId = tunnelId;
_found = false;
_expiration = getContext().clock().now() + TEST_TIMEOUT;
_expiration = getContext().clock().now() + getTunnelTestTimeout();
if (_log.shouldLog(Log.DEBUG))
_log.debug("the expiration while testing tunnel " + tunnelId
+ " waiting for nonce " + id + ": " + new Date(_expiration));