Compare commits

..

37 Commits

Author SHA1 Message Date
jrandom
4c59cd7621 * 2006-10-10 0.6.1.26 released
2006-10-10  jrandom
    * Removed the status display from the console, as its more confusing
      than informative (though the content is still displayed in the HTML)
2006-10-09 01:44:47 +00:00
complication
ef707e7956 2006-10-08 Complication
* Update comment to reflect current status
2006-10-08 23:10:58 +00:00
complication
73cf3fb299 2006-10-08 Complication
* Add a framed average peer clock skew calculator
    * Add config property "router.clockOffsetSanityCheck" to determine
      if NTP-suggested clock offsets get sanity checked (default "true")
    * Reject NTP-suggested clock offsets if they'd increase peer clock skew
      by more than 5 seconds, or make it more than 20 seconds total
    * Decrease log level in getMedianPeerClockSkew()
2006-10-08 22:52:59 +00:00
zzz
80b0c97d72 (zzz) 10/3 status notes 2006-10-04 19:41:09 +00:00
zzz
5cf85c1d7b (zzz)
* i2psnark: Second try at synchronization fix - synch addRequest()
      completely rather than just portions of it and requestNextPiece()
2006-09-29 23:54:17 +00:00
jrandom
c14e52ceb5 2006-09-27 jrandom
* added HMAC-SHA256
    * properly use CRLF with EepPost
    * suppress jbigi/jcpuid messages if jbigi.dontLog/jcpuid.dontLog is set
    * PBE session key generation (with 1000 rounds of SHA256)
    * misc SDK helper functions
2006-09-27 06:00:33 +00:00
complication
32a579e480 2006-09-26 Complication
* Take back another inadverent logging change in NTCPConnection
2006-09-27 04:44:13 +00:00
complication
0a240a4436 2006-09-26 Complication
* Take back an accidental log level change
2006-09-27 04:31:34 +00:00
complication
9325b806e4 2006-09-26 Complication
* Subclass from Clock a RouterClock which can access router transports,
      with the goal of developing it to second-guess NTP results
    * Make transports report clock skew in seconds
    * Adjust renderStatusHTML() methods accordingly
    * Show average for NTCP clock skews too
    * Give transports a getClockSkews() method to report clock skews
    * Give transport manager a getClockSkews() method to aggregate results
    * Give comm system facade a getMedianPeerClockSkew() method which RouterClock calls
      (to observe results, add "net.i2p.router.transport.CommSystemFacadeImpl=WARN" to
logging)
    * Extra explicitness in NTCP classes to denote unit of time.
    * Fix some places in NTCPConnection where milliseconds and seconds were confused
2006-09-27 04:02:13 +00:00
zzz
ef2e24ea11 (zzz)
* i2psnark: Paranoid copy before writing pieces,
      recheck files on completion, redownload bad pieces
    * i2psnark: Don't contact tracker as often when seeding
2006-09-26 03:11:39 +00:00
zzz
373934c6e0 (zzz)
* i2psnark: Add some synchronization to prevent rare problem
      after restoring orphan piece
2006-09-24 18:30:22 +00:00
zzz
e8e8bac694 (zzz)
* i2psnark: Eliminate duplicate requests caused by i2p-bt's
      rapid choke/unchokes
    * i2psnark: Truncate long TrackerErr messages on web page
2006-09-20 22:39:24 +00:00
zzz
23e8a558c2 (zzz)
* i2psnark: Implement retransmission of requests. This
      eliminates one cause of complete stalls with a peer.
      This problem is common on torrents with a small number of
      active peers where there are no choke/unchokes to kickstart things.
2006-09-16 21:07:28 +00:00
complication
46f2645834 2006-09-14 Complication
* news.xml update
2006-09-14 03:16:53 +00:00
zzz
2329439034 (zzz)
* i2psnark: Fix restoral of partial pieces broken by last patch
2006-09-14 02:37:32 +00:00
zzz
6d400368b9 (zzz) changelog date fix 2006-09-13 23:24:14 +00:00
zzz
26c13b40fe (zzz)
* i2psnark: Mark a peer's requests as unrequested on disconnect,
      preventing premature end game
    * i2psnark: Randomize selection of next piece during end game
    * i2psnark: Don't restore a partial piece to a peer that is already working on it
    * i2psnark: strip ".torrent" on web page
    * i2psnark: Limit piece size in generated torrent to 1MB max
2006-09-13 23:02:07 +00:00
zzz
9fd0e95fe8 (zzz) 9/12 status 2006-09-13 17:34:58 +00:00
zzz
7e21f2c92b (zzz)
* i2psnark: Add "Stalled" indication and stat totals on web page
2006-09-10 01:55:37 +00:00
zzz
c9d8e796c6 (zzz)
* i2psnark: Fix bug where new peers would always be set to "interested"
      regardless of actual interest
    * i2psnark: Reduce max piece size from 10MB to 1MB; larger may have severe
      memory and efficiency problems
2006-09-09 22:15:05 +00:00
zzz
e7203f5d46 (zzz) 0.6.1.25 2006-09-09 21:19:49 +00:00
jrandom
22d76a1b64 * 2006-09-09 0.6.1.25 released 2006-09-09 17:46:21 +00:00
jrandom
0903dc46c6 2006-09-08 jrandom
* Tweak the PRNG logging so it only displays error messages if there are
      problems
    * Disable dynamic router keys for the time being, as they don't offer
      meaningful security, may hurt the router, and makes it harder to
      determine the network health.  The code to restart on SSU IP change is
      still enabled however.
    * Disable tunnel load testing, leaning back on the tiered selection for
      the time being.
    * Spattering of bugfixes
2006-09-09 01:41:57 +00:00
zzz
0f56ec8078 (zzz) oops remove duplicate 2006-09-07 23:26:53 +00:00
zzz
70ee1df2bf (zzz)
* i2psnark: Increase output timeout from 2 min to 4 min
    * i2psnark: Orphan debug msg cleanup
    * i2psnark: More web rate report cleanup
2006-09-07 23:22:12 +00:00
zzz
61a6a29bec cCVS: ---------------------------------------------------------------------- 2006-09-07 23:03:18 +00:00
zzz
678f7d8f72 (zzz)
* i2psnark: Implement basic partial-piece saves across connections
    * i2psnark: Implement keep-alive sending. This will keep non-i2psnark clients
      from dropping us for inactivity but also renders the 2-minute transmit-inactivity
      code in i2psnark ineffective. Will have to research why there is transmit but
      not receive inactivity code. With the current connection limit of 24 peers
      we aren't in any danger of keeping out new peers by keeping inactive ones.
    * i2psnark: Increase CHECK_PERIOD from 20 to 40 since nothing happens in 20 seconds
    * i2psnark: Fix dropped chunk handling
    * i2psnark: Web rate report cleanup
2006-09-06 06:32:53 +00:00
zzz
b92ee364bc (zzz)
* i2psnark: Report cleared trackerErr immediately
    * i2psnark: Add trackerErr reporting after previous success; retry more quickly
    * i2psnark: Set up new connections more quickly
    * i2psnark: Don't delay tracker fetch when setting up lots of connections
    * i2psnark: Reduce MAX_UPLOADERS from 12 to 4
2006-09-04 08:26:21 +00:00
zzz
aef19fcd38 (zzz) i2psnark: enable pipelining, set tunnel length default to 1 + 0-1 2006-09-04 06:01:53 +00:00
zzz
3b01df1d2c (zzz) Add rate reporting to i2psnark 2006-09-03 09:12:22 +00:00
complication
4aed23b198 2006-09-03 Complication
* Limit form size in SusiDNS to avoid exceeding a POST size limit on postback
    * Print messages about addressbook size to give better overview
    * Enable delete function in published addressbook
2006-09-03 06:37:46 +00:00
complication
03e8875c27 2006-08-21 Complication
* Fix error reporting discrepancy (thanks for helping notice, yojoe!)
2006-08-21 05:55:33 +00:00
complication
48921a0875 2006-08-03 Complication
* news.xml update
2006-08-04 00:49:40 +00:00
jrandom
633fabb09e 2006-08-03 jrandom
* Decrease the recently modified tunnel building timeout, though keep
      the scaling on their processing
2006-08-03 22:34:24 +00:00
jrandom
bc42c26d94 2006-07-31 jrandom
* Increase the tunnel building timeout
    * Avoid a rare race (thanks bar!)
    * Fix the bandwidth capacity publishing code to factor in share percentage
      and outbound throttling (oops)
2006-08-01 02:26:52 +00:00
complication
3c09ca3359 2006-07-29 Complication
* Treat NTP responses from unexpected stratums like failures
2006-07-30 05:08:20 +00:00
zzz
1e9e7dd345 (zzz) 0.6.1.24 2006-07-29 23:02:57 +00:00
55 changed files with 1306 additions and 234 deletions

View File

@@ -318,6 +318,14 @@ public class Peer implements Comparable
PeerState s = state;
if (s != null)
{
// try to save partial piece
if (this.deregister) {
PeerListener p = state.listener;
if (p != null) {
p.savePeerPartial(state);
p.markUnrequested(this);
}
}
state = null;
PeerConnectionIn in = s.in;
@@ -458,4 +466,25 @@ public class Peer implements Comparable
return -1; //"no state";
}
}
/**
* Send keepalive
*/
public void keepAlive()
{
PeerState s = state;
if (s != null)
s.keepAlive();
}
/**
* Retransmit outstanding requests if necessary
*/
public void retransmitRequests()
{
PeerState s = state;
if (s != null)
s.retransmitRequests();
}
}

View File

@@ -89,8 +89,6 @@ class PeerCheckerTask extends TimerTask
if (peer.isChoked())
choked++;
// XXX - We should calculate the up/download rate a bit
// more intelligently
long upload = peer.getUploaded();
uploaded += upload;
long download = peer.getDownloaded();
@@ -166,6 +164,8 @@ class PeerCheckerTask extends TimerTask
worstDownloader = peer;
}
}
peer.retransmitRequests();
peer.keepAlive();
}
// Resync actual uploaders value
@@ -196,6 +196,10 @@ class PeerCheckerTask extends TimerTask
// Put peers back at the end of the list that we removed earlier.
coordinator.peers.addAll(removed);
coordinator.peerCount = coordinator.peers.size();
// store the rates
coordinator.setRateHistory(uploaded, downloaded);
}
if (coordinator.halted()) {
cancel();

View File

@@ -259,7 +259,13 @@ class PeerConnectionOut implements Runnable
{
Message m = new Message();
m.type = Message.KEEP_ALIVE;
addMessage(m);
// addMessage(m);
synchronized(sendQueue)
{
if(sendQueue.isEmpty())
sendQueue.add(m);
sendQueue.notifyAll();
}
}
void sendChoke(boolean choke)
@@ -318,6 +324,23 @@ class PeerConnectionOut implements Runnable
addMessage(m);
}
/** reransmit requests not received in 7m */
private static final int REQ_TIMEOUT = (2 * SEND_TIMEOUT) + (60 * 1000);
void retransmitRequests(List requests)
{
long now = System.currentTimeMillis();
Iterator it = requests.iterator();
while (it.hasNext())
{
Request req = (Request)it.next();
if(now > req.sendTime + REQ_TIMEOUT) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Retransmit request " + req + " to peer " + peer);
sendRequest(req);
}
}
}
void sendRequests(List requests)
{
Iterator it = requests.iterator();
@@ -330,12 +353,30 @@ class PeerConnectionOut implements Runnable
void sendRequest(Request req)
{
// Check for duplicate requests to deal with fibrillating i2p-bt
// (multiple choke/unchokes received cause duplicate requests in the queue)
synchronized(sendQueue)
{
Iterator it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
if (m.type == Message.REQUEST && m.piece == req.piece &&
m.begin == req.off && m.length == req.len)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug("Discarding duplicate request " + req + " to peer " + peer);
return;
}
}
}
Message m = new Message();
m.type = Message.REQUEST;
m.piece = req.piece;
m.begin = req.off;
m.length = req.len;
addMessage(m);
req.sendTime = System.currentTimeMillis();
}
void sendPiece(int piece, int begin, int length, byte[] bytes)

View File

@@ -37,9 +37,9 @@ public class PeerCoordinator implements PeerListener
final Snark snark;
// package local for access by CheckDownLoadersTask
final static long CHECK_PERIOD = 20*1000; // 20 seconds
final static long CHECK_PERIOD = 40*1000; // 40 seconds
final static int MAX_CONNECTIONS = 24;
final static int MAX_UPLOADERS = 12; // i2p: might as well balance it out
final static int MAX_UPLOADERS = 4;
// Approximation of the number of current uploaders.
// Resynced by PeerChecker once in a while.
@@ -50,6 +50,9 @@ public class PeerCoordinator implements PeerListener
private long uploaded;
private long downloaded;
final static int RATE_DEPTH = 6; // make following arrays RATE_DEPTH long
private long uploaded_old[] = {0,0,0,0,0,0};
private long downloaded_old[] = {0,0,0,0,0,0};
// synchronize on this when changing peers or downloaders
final List peers = new ArrayList();
@@ -62,7 +65,7 @@ public class PeerCoordinator implements PeerListener
private final byte[] id;
// Some random wanted pieces
private final List wantedPieces;
private List wantedPieces;
private boolean halted = false;
@@ -80,6 +83,15 @@ public class PeerCoordinator implements PeerListener
this.listener = listener;
this.snark = torrent;
setWantedPieces();
// Install a timer to check the uploaders.
timer.schedule(new PeerCheckerTask(this), CHECK_PERIOD, CHECK_PERIOD);
}
// only called externally from Storage after the double-check fails
public void setWantedPieces()
{
// Make a list of pieces
wantedPieces = new ArrayList();
BitField bitfield = storage.getBitField();
@@ -87,11 +99,8 @@ public class PeerCoordinator implements PeerListener
if (!bitfield.get(i))
wantedPieces.add(new Piece(i));
Collections.shuffle(wantedPieces);
// Install a timer to check the uploaders.
timer.schedule(new PeerCheckerTask(this), CHECK_PERIOD, CHECK_PERIOD);
}
public Storage getStorage() { return storage; }
public CoordinatorListener getListener() { return listener; }
@@ -142,6 +151,43 @@ public class PeerCoordinator implements PeerListener
return downloaded;
}
/**
* Push the total uploaded/downloaded onto a RATE_DEPTH deep stack
*/
public void setRateHistory(long up, long down)
{
for (int i = RATE_DEPTH-1; i > 0; i--){
uploaded_old[i] = uploaded_old[i-1];
downloaded_old[i] = downloaded_old[i-1];
}
uploaded_old[0] = up;
downloaded_old[0] = down;
}
/**
* Returns the 4-minute-average rate in Bps
*/
public long getDownloadRate()
{
long rate = 0;
for (int i = 0; i < RATE_DEPTH; i++){
rate += downloaded_old[i];
}
return rate / (RATE_DEPTH * CHECK_PERIOD / 1000);
}
/**
* Returns the 4-minute-average rate in Bps
*/
public long getUploadRate()
{
long rate = 0;
for (int i = 0; i < RATE_DEPTH; i++){
rate += uploaded_old[i];
}
return rate / (RATE_DEPTH * CHECK_PERIOD / 1000);
}
public MetaInfo getMetaInfo()
{
return metainfo;
@@ -191,8 +237,10 @@ public class PeerCoordinator implements PeerListener
synchronized(peers)
{
Peer old = peerIDInList(peer.getPeerID(), peers);
if ( (old != null) && (old.getInactiveTime() > 2*60*1000) ) {
// idle for 2 minutes, kill the old con
if ( (old != null) && (old.getInactiveTime() > 4*60*1000) ) {
// idle for 4 minutes, kill the old con (64KB/4min = 273B/sec minimum for one block)
if (_log.shouldLog(Log.WARN))
_log.warn("Remomving old peer: " + peer + ": " + old + ", inactive for " + old.getInactiveTime());
peers.remove(old);
toDisconnect = old;
old = null;
@@ -235,12 +283,13 @@ public class PeerCoordinator implements PeerListener
return null;
}
public void addPeer(final Peer peer)
// returns true if actual attempt to add peer occurs
public boolean addPeer(final Peer peer)
{
if (halted)
{
peer.disconnect(false);
return;
return false;
}
boolean need_more;
@@ -265,6 +314,7 @@ public class PeerCoordinator implements PeerListener
};
String threadName = peer.toString();
new I2PThread(r, threadName).start();
return true;
}
else
if (_log.shouldLog(Log.DEBUG)) {
@@ -274,6 +324,7 @@ public class PeerCoordinator implements PeerListener
_log.info("MAX_CONNECTIONS = " + MAX_CONNECTIONS
+ " not accepting extra peer: " + peer);
}
return false;
}
@@ -351,9 +402,10 @@ public class PeerCoordinator implements PeerListener
{
Piece p = (Piece)it.next();
int i = p.getId();
if (bitfield.get(i))
if (bitfield.get(i)) {
p.addPeer(peer);
return true;
}
}
}
return false;
@@ -392,6 +444,8 @@ public class PeerCoordinator implements PeerListener
//Only request a piece we've requested before if there's no other choice.
if (piece == null) {
// let's not all get on the same piece
Collections.shuffle(requested);
Iterator it2 = requested.iterator();
while (piece == null && it2.hasNext())
{
@@ -403,9 +457,17 @@ public class PeerCoordinator implements PeerListener
}
if (piece == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("nothing to even rerequest from " + peer + ": requested = " + requested
+ " wanted = " + wantedPieces + " peerHas = " + havePieces);
_log.warn("nothing to even rerequest from " + peer + ": requested = " + requested);
// _log.warn("nothing to even rerequest from " + peer + ": requested = " + requested
// + " wanted = " + wantedPieces + " peerHas = " + havePieces);
return -1; //If we still can't find a piece we want, so be it.
} else {
// Should be a lot smarter here - limit # of parallel attempts and
// share blocks rather than starting from 0 with each peer.
// This is where the flaws of the snark data model are really exposed.
// Could also randomize within the duplicate set rather than strict rarest-first
if (_log.shouldLog(Log.DEBUG))
_log.debug("parallel request (end game?) for " + peer + ": piece = " + piece);
}
}
piece.setRequested(true);
@@ -582,4 +644,124 @@ public class PeerCoordinator implements PeerListener
}
}
}
/** Simple method to save a partial piece on peer disconnection
* and hopefully restart it later.
* Only one partial piece is saved at a time.
* Replace it if a new one is bigger or the old one is too old.
* Storage method is private so we can expand to save multiple partials
* if we wish.
*/
private Request savedRequest = null;
private long savedRequestTime = 0;
public void savePeerPartial(PeerState state)
{
Request req = state.getPartialRequest();
if (req == null)
return;
if (savedRequest == null ||
req.off > savedRequest.off ||
System.currentTimeMillis() > savedRequestTime + (15 * 60 * 1000)) {
if (savedRequest == null || (req.piece != savedRequest.piece && req.off != savedRequest.off)) {
if (_log.shouldLog(Log.DEBUG)) {
_log.debug(" Saving orphaned partial piece " + req);
if (savedRequest != null)
_log.debug(" (Discarding previously saved orphan) " + savedRequest);
}
}
savedRequest = req;
savedRequestTime = System.currentTimeMillis();
} else {
if (req.piece != savedRequest.piece)
if (_log.shouldLog(Log.DEBUG))
_log.debug(" Discarding orphaned partial piece " + req);
}
}
/** Return partial piece if it's still wanted and peer has it.
*/
public Request getPeerPartial(BitField havePieces) {
if (savedRequest == null)
return null;
if (! havePieces.get(savedRequest.piece)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer doesn't have orphaned piece " + savedRequest);
return null;
}
synchronized(wantedPieces)
{
for(Iterator iter = wantedPieces.iterator(); iter.hasNext(); ) {
Piece piece = (Piece)iter.next();
if (piece.getId() == savedRequest.piece) {
Request req = savedRequest;
piece.setRequested(true);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Restoring orphaned partial piece " + req);
savedRequest = null;
return req;
}
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("We no longer want orphaned piece " + savedRequest);
savedRequest = null;
return null;
}
/** Clear the requested flag for a piece if the peer
** is the only one requesting it
*/
private void markUnrequestedIfOnlyOne(Peer peer, int piece)
{
// see if anybody else is requesting
synchronized (peers)
{
Iterator it = peers.iterator();
while (it.hasNext()) {
Peer p = (Peer)it.next();
if (p.equals(peer))
continue;
if (p.state == null)
continue;
int[] arr = p.state.getRequestedPieces();
for (int i = 0; arr[i] >= 0; i++)
if(arr[i] == piece) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Another peer is requesting piece " + piece);
return;
}
}
}
// nobody is, so mark unrequested
synchronized(wantedPieces)
{
Iterator it = wantedPieces.iterator();
while (it.hasNext()) {
Piece p = (Piece)it.next();
if (p.getId() == piece) {
p.setRequested(false);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing from request list piece " + piece);
return;
}
}
}
}
/** Mark a peer's requested pieces unrequested when it is disconnected
** Once for each piece
** This is enough trouble, maybe would be easier just to regenerate
** the requested list from scratch instead.
*/
public void markUnrequested(Peer peer)
{
if (peer.state == null)
return;
int[] arr = peer.state.getRequestedPieces();
for (int i = 0; arr[i] >= 0; i++)
markUnrequestedIfOnlyOne(peer, arr[i]);
}
}

View File

@@ -142,4 +142,30 @@ public interface PeerListener
* we are no longer interested in the peer.
*/
int wantPiece(Peer peer, BitField bitfield);
/**
* Called when the peer has disconnected and the peer task may have a partially
* downloaded piece that the PeerCoordinator can save
*
* @param state the PeerState for the peer
*/
void savePeerPartial(PeerState state);
/**
* Called when a peer has connected and there may be a partially
* downloaded piece that the coordinatorator can give the peer task
*
* @param havePieces the have-pieces bitmask for the peer
*
* @return request (contains the partial data and valid length)
*/
Request getPeerPartial(BitField havePieces);
/** Mark a peer's requested pieces unrequested when it is disconnected
* This prevents premature end game
*
* @param peer the peer that is disconnecting
*/
void markUnrequested(Peer peer);
}

View File

@@ -62,7 +62,7 @@ class PeerState
// If we have te resend outstanding requests (true after we got choked).
private boolean resend = false;
private final static int MAX_PIPELINE = 1;
private final static int MAX_PIPELINE = 2;
private final static int PARTSIZE = 64*1024; // default was 16K, i2p-bt uses 64KB
PeerState(Peer peer, PeerListener listener, MetaInfo metainfo,
@@ -221,6 +221,10 @@ class PeerState
listener.uploaded(peer, size);
}
// This is used to flag that we have to back up from the firstOutstandingRequest
// when calculating how far we've gotten
Request pendingRequest = null;
/**
* Called when a partial piece request has been handled by
* PeerConnectionIn.
@@ -231,6 +235,8 @@ class PeerState
downloaded += size;
listener.downloaded(peer, size);
pendingRequest = null;
// Last chunk needed for this piece?
if (getFirstOutstandingRequest(req.piece) == -1)
{
@@ -318,14 +324,8 @@ class PeerState
{
Request dropReq = (Request)outstandingRequests.remove(0);
outstandingRequests.add(dropReq);
// We used to rerequest the missing chunks but that mostly
// just confuses the other side. So now we just keep
// waiting for them. They will be rerequested when we get
// choked/unchoked again.
/*
if (!choked)
if (!choked)
out.sendRequest(dropReq);
*/
if (_log.shouldLog(Log.WARN))
_log.warn("dropped " + dropReq + " with peer " + peer);
}
@@ -336,10 +336,58 @@ class PeerState
// Request more if necessary to keep the pipeline filled.
addRequest();
pendingRequest = req;
return req;
}
// get longest partial piece
Request getPartialRequest()
{
Request req = null;
for (int i = 0; i < outstandingRequests.size(); i++) {
Request r1 = (Request)outstandingRequests.get(i);
int j = getFirstOutstandingRequest(r1.piece);
if (j == -1)
continue;
Request r2 = (Request)outstandingRequests.get(j);
if (r2.off > 0 && ((req == null) || (r2.off > req.off)))
req = r2;
}
if (pendingRequest != null && req != null && pendingRequest.off < req.off) {
if (pendingRequest.off != 0)
req = pendingRequest;
else
req = null;
}
return req;
}
// return array of pieces terminated by -1
// remove most duplicates
// but still could be some duplicates, not guaranteed
int[] getRequestedPieces()
{
int size = outstandingRequests.size();
int[] arr = new int[size+2];
int pc = -1;
int pos = 0;
if (pendingRequest != null) {
pc = pendingRequest.piece;
arr[pos++] = pc;
}
Request req = null;
for (int i = 0; i < size; i++) {
Request r1 = (Request)outstandingRequests.get(i);
if (pc != r1.piece) {
pc = r1.piece;
arr[pos++] = pc;
}
}
arr[pos] = -1;
return(arr);
}
void cancelMessage(int piece, int begin, int length)
{
if (_log.shouldLog(Log.DEBUG))
@@ -414,16 +462,12 @@ class PeerState
/**
* Adds a new request to the outstanding requests list.
*/
private void addRequest()
synchronized private void addRequest()
{
boolean more_pieces = true;
while (more_pieces)
{
synchronized(this)
{
more_pieces = outstandingRequests.size() < MAX_PIPELINE;
}
more_pieces = outstandingRequests.size() < MAX_PIPELINE;
// We want something and we don't have outstanding requests?
if (more_pieces && lastRequest == null)
more_pieces = requestNextPiece();
@@ -431,19 +475,14 @@ class PeerState
{
int pieceLength;
boolean isLastChunk;
synchronized(this)
{
pieceLength = metainfo.getPieceLength(lastRequest.piece);
isLastChunk = lastRequest.off + lastRequest.len == pieceLength;
}
pieceLength = metainfo.getPieceLength(lastRequest.piece);
isLastChunk = lastRequest.off + lastRequest.len == pieceLength;
// Last part of a piece?
if (isLastChunk)
more_pieces = requestNextPiece();
else
{
synchronized(this)
{
int nextPiece = lastRequest.piece;
int nextBegin = lastRequest.off + PARTSIZE;
byte[] bs = lastRequest.bs;
@@ -456,7 +495,6 @@ class PeerState
if (!choked)
out.sendRequest(req);
lastRequest = req;
}
}
}
}
@@ -472,11 +510,29 @@ class PeerState
// Check that we already know what the other side has.
if (bitfield != null)
{
// Check for adopting an orphaned partial piece
Request r = listener.getPeerPartial(bitfield);
if (r != null) {
// Check that r not already in outstandingRequests
int[] arr = getRequestedPieces();
boolean found = false;
for (int i = 0; arr[i] >= 0; i++) {
if (arr[i] == r.piece) {
found = true;
break;
}
}
if (!found) {
outstandingRequests.add(r);
if (!choked)
out.sendRequest(r);
lastRequest = r;
return true;
}
}
int nextPiece = listener.wantPiece(peer, bitfield);
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " want piece " + nextPiece);
synchronized(this)
{
if (nextPiece != -1
&& (lastRequest == null || lastRequest.piece != nextPiece))
{
@@ -491,7 +547,6 @@ class PeerState
lastRequest = req;
return true;
}
}
}
return false;
@@ -523,4 +578,15 @@ class PeerState
out.sendChoke(choke);
}
}
void keepAlive()
{
out.sendAlive();
}
synchronized void retransmitRequests()
{
if (interesting && !choked)
out.retransmitRequests(outstandingRequests);
}
}

View File

@@ -29,6 +29,7 @@ class Request
final byte[] bs;
final int off;
final int len;
long sendTime;
/**
* Creates a new Request.

View File

@@ -693,6 +693,11 @@ public class Snark
completeListener.torrentComplete(this);
}
public void setWantedPieces(Storage storage)
{
coordinator.setWantedPieces();
}
public void shutdown()
{
// Should not be necessary since all non-deamon threads should

View File

@@ -91,6 +91,8 @@ public class SnarkManager implements Snark.CompleteListener {
_config.setProperty(PROP_I2CP_HOST, "localhost");
if (!_config.containsKey(PROP_I2CP_PORT))
_config.setProperty(PROP_I2CP_PORT, "7654");
if (!_config.containsKey(PROP_I2CP_OPTS))
_config.setProperty(PROP_I2CP_OPTS, "inbound.length=1 inbound.lengthVariance=1 outbound.length=1 outbound.lengthVariance=1");
if (!_config.containsKey(PROP_EEP_HOST))
_config.setProperty(PROP_EEP_HOST, "localhost");
if (!_config.containsKey(PROP_EEP_PORT))
@@ -333,7 +335,7 @@ public class SnarkManager implements Snark.CompleteListener {
return "Too many files in " + info.getName() + " (" + files.size() + "), deleting it";
} else if (info.getPieces() <= 0) {
return "No pieces in " + info.getName() + "? deleting it";
} else if (info.getPieceLength(0) > 10*1024*1024) {
} else if (info.getPieceLength(0) > 1*1024*1024) {
return "Pieces are too large in " + info.getName() + " (" + info.getPieceLength(0)/1024 + "KB, deleting it";
} else if (info.getTotalLength() > 10*1024*1024*1024l) {
System.out.println("torrent info: " + info.toString());

View File

@@ -39,7 +39,7 @@ public class Storage
private final StorageListener listener;
private final BitField bitfield; // BitField to represent the pieces
private BitField bitfield; // BitField to represent the pieces
private int needed; // Number of pieces needed
// XXX - Not always set correctly
@@ -48,6 +48,7 @@ public class Storage
/** The default piece size. */
private static int MIN_PIECE_SIZE = 256*1024;
private static int MAX_PIECE_SIZE = 1024*1024;
/** The maximum number of pieces in a torrent. */
private static long MAX_PIECES = 100*1024/20;
@@ -90,7 +91,7 @@ public class Storage
piece_size = MIN_PIECE_SIZE;
pieces = (int) ((total - 1)/piece_size) + 1;
while (pieces > MAX_PIECES)
while (pieces > MAX_PIECES && piece_size < MAX_PIECE_SIZE)
{
piece_size = piece_size*2;
pieces = (int) ((total - 1)/piece_size) +1;
@@ -482,10 +483,11 @@ public class Storage
* matches), otherwise false.
* @exception IOException when some storage related error occurs.
*/
public boolean putPiece(int piece, byte[] bs) throws IOException
public boolean putPiece(int piece, byte[] ba) throws IOException
{
// First check if the piece is correct.
// If we were paranoia we could copy the array first.
// Copy the array first to be paranoid.
byte[] bs = (byte[]) ba.clone();
int length = bs.length;
boolean correctHash = metainfo.checkPiece(piece, bs, 0, length);
if (listener != null)
@@ -504,6 +506,7 @@ public class Storage
needed--;
complete = needed == 0;
}
}
// Early typecast, avoid possibly overflowing a temp integer
@@ -538,9 +541,25 @@ public class Storage
}
if (complete) {
listener.storageCompleted(this);
// listener.storageCompleted(this);
// do we also need to close all of the files and reopen
// them readonly?
// Do a complete check to be sure.
// Temporarily resets the 'needed' variable and 'bitfield', then call
// checkCreateFiles() which will set 'needed' and 'bitfield'
// and also call listener.storageCompleted() if the double-check
// was successful.
// Todo: set a listener variable so the web shows "checking" and don't
// have the user panic when completed amount goes to zero temporarily?
needed = metainfo.getPieces();
bitfield = new BitField(needed);
checkCreateFiles();
if (needed > 0) {
listener.setWantedPieces(this);
Snark.debug("WARNING: Not really done, missing " + needed
+ " pieces", Snark.WARNING);
}
}
return true;

View File

@@ -55,4 +55,11 @@ public interface StorageListener
*
*/
void storageCompleted(Storage storage);
/** Reset the peer's wanted pieces table
* Call after the storage double-check fails
*
* @param peer the peer
*/
void setWantedPieces(Storage storage);
}

View File

@@ -43,6 +43,8 @@ public class TrackerClient extends I2PThread
private static final String STOPPED_EVENT = "stopped";
private final static int SLEEP = 5; // 5 minutes.
private final static int DELAY_MIN = 2000; // 2 secs.
private final static int DELAY_MUL = 1500; // 1.5 secs.
private final MetaInfo meta;
private final PeerCoordinator coordinator;
@@ -110,6 +112,7 @@ public class TrackerClient extends I2PThread
long left = coordinator.getLeft();
boolean completed = (left == 0);
int sleptTime = 0;
try
{
@@ -117,6 +120,7 @@ public class TrackerClient extends I2PThread
boolean started = false;
while (!started)
{
sleptTime = 0;
try
{
// Send start.
@@ -125,18 +129,20 @@ public class TrackerClient extends I2PThread
STARTED_EVENT);
Set peers = info.getPeers();
coordinator.trackerSeenPeers = peers.size();
coordinator.trackerProblems = null;
if (!completed) {
Iterator it = peers.iterator();
while (it.hasNext()) {
Peer cur = (Peer)it.next();
coordinator.addPeer(cur);
int delay = 3000;
int c = ((int)cur.getPeerID().getAddress().calculateHash().toBase64().charAt(0)) % 10;
try { Thread.sleep(delay * c); } catch (InterruptedException ie) {}
int delay = DELAY_MUL;
delay *= ((int)cur.getPeerID().getAddress().calculateHash().toBase64().charAt(0)) % 10;
delay += DELAY_MIN;
sleptTime += delay;
try { Thread.sleep(delay); } catch (InterruptedException ie) {}
}
}
started = true;
coordinator.trackerProblems = null;
}
catch (IOException ioe)
{
@@ -168,8 +174,17 @@ public class TrackerClient extends I2PThread
try
{
// Sleep some minutes...
int delay = SLEEP*60*1000 + r.nextInt(120*1000);
Thread.sleep(delay);
int delay;
if(coordinator.trackerProblems != null && !completed) {
delay = 60*1000;
} else if(completed) {
delay = 3*SLEEP*60*1000 + r.nextInt(120*1000);
} else {
delay = SLEEP*60*1000 + r.nextInt(120*1000);
delay -= sleptTime;
}
if (delay > 0)
Thread.sleep(delay);
}
catch(InterruptedException interrupt)
{
@@ -196,6 +211,7 @@ public class TrackerClient extends I2PThread
event = NO_EVENT;
// Only do a request when necessary.
sleptTime = 0;
if (event == COMPLETED_EVENT
|| coordinator.needPeers()
|| System.currentTimeMillis() > lastRequestTime + interval)
@@ -206,6 +222,7 @@ public class TrackerClient extends I2PThread
uploaded, downloaded, left,
event);
coordinator.trackerProblems = null;
Set peers = info.getPeers();
coordinator.trackerSeenPeers = peers.size();
if ( (left > 0) && (!completed) ) {
@@ -216,10 +233,14 @@ public class TrackerClient extends I2PThread
Iterator it = ordered.iterator();
while (it.hasNext()) {
Peer cur = (Peer)it.next();
coordinator.addPeer(cur);
int delay = 3000;
int c = ((int)cur.getPeerID().getAddress().calculateHash().toBase64().charAt(0)) % 10;
try { Thread.sleep(delay * c); } catch (InterruptedException ie) {}
// only delay if we actually make an attempt to add peer
if(coordinator.addPeer(cur)) {
int delay = DELAY_MUL;
delay *= ((int)cur.getPeerID().getAddress().calculateHash().toBase64().charAt(0)) % 10;
delay += DELAY_MIN;
sleptTime += delay;
try { Thread.sleep(delay); } catch (InterruptedException ie) {}
}
}
}
}
@@ -229,6 +250,7 @@ public class TrackerClient extends I2PThread
Snark.debug
("WARNING: Could not contact tracker at '"
+ announce + "': " + ioe, Snark.WARNING);
coordinator.trackerProblems = ioe.getMessage();
}
}
}

View File

@@ -43,6 +43,7 @@ public class I2PSnarkServlet extends HttpServlet {
req.setCharacterEncoding("UTF-8");
resp.setCharacterEncoding("UTF-8");
resp.setContentType("text/html; charset=UTF-8");
long stats[] = {0,0,0,0};
String nonce = req.getParameter("nonce");
if ( (nonce != null) && (nonce.equals(String.valueOf(_nonce))) )
@@ -72,10 +73,18 @@ public class I2PSnarkServlet extends HttpServlet {
String uri = req.getRequestURI();
for (int i = 0; i < snarks.size(); i++) {
Snark snark = (Snark)snarks.get(i);
displaySnark(out, snark, uri, i);
displaySnark(out, snark, uri, i, stats);
}
if (snarks.size() <= 0) {
out.write(TABLE_EMPTY);
} else if (snarks.size() > 1) {
out.write(TABLE_TOTAL);
out.write(" <th align=\"right\" valign=\"top\">" + formatSize(stats[0]) + "</th>\n" +
" <th align=\"right\" valign=\"top\">" + formatSize(stats[1]) + "</th>\n" +
" <th align=\"right\" valign=\"top\">" + formatSize(stats[2]) + "ps</th>\n" +
" <th align=\"right\" valign=\"top\">" + formatSize(stats[3]) + "ps</th>\n" +
" <th>&nbsp;</th></tr>\n" +
"</tfoot>\n");
}
out.write(TABLE_FOOTER);
@@ -279,12 +288,16 @@ public class I2PSnarkServlet extends HttpServlet {
}
return rv;
}
private static final int MAX_DISPLAYED_FILENAME_LENGTH = 60;
private void displaySnark(PrintWriter out, Snark snark, String uri, int row) throws IOException {
private static final int MAX_DISPLAYED_ERROR_LENGTH = 30;
private void displaySnark(PrintWriter out, Snark snark, String uri, int row, long stats[]) throws IOException {
String filename = snark.torrent;
File f = new File(filename);
filename = f.getName(); // the torrent may be the canonical name, so lets just grab the local name
int i = filename.lastIndexOf(".torrent");
if (i > 0)
filename = filename.substring(0, i);
if (filename.length() > MAX_DISPLAYED_FILENAME_LENGTH)
filename = filename.substring(0, MAX_DISPLAYED_FILENAME_LENGTH) + "...";
long total = snark.meta.getTotalLength();
@@ -292,11 +305,22 @@ public class I2PSnarkServlet extends HttpServlet {
long remaining = (long) snark.storage.needed() * (long) snark.meta.getPieceLength(0);
if (remaining > total)
remaining = total;
int totalBps = 4096; // should probably grab this from the snark...
long remainingSeconds = remaining / totalBps;
long downBps = snark.coordinator.getDownloadRate();
long upBps = snark.coordinator.getUploadRate();
long remainingSeconds;
if (downBps > 0)
remainingSeconds = remaining / downBps;
else
remainingSeconds = -1;
long uploaded = snark.coordinator.getUploaded();
boolean isRunning = !snark.stopped;
stats[0] += snark.coordinator.getDownloaded();
stats[1] += uploaded;
if (isRunning) {
stats[2] += downBps;
stats[3] += upBps;
}
boolean isValid = snark.meta != null;
boolean singleFile = snark.meta.getFiles() == null;
@@ -308,16 +332,23 @@ public class I2PSnarkServlet extends HttpServlet {
if (err != null) {
if (isRunning)
statusString = "TrackerErr (" + curPeers + "/" + knownPeers + " peers)";
else
else {
if (err.length() > MAX_DISPLAYED_ERROR_LENGTH)
err = err.substring(0, MAX_DISPLAYED_ERROR_LENGTH) + "...";
statusString = "TrackerErr (" + err + ")";
}
} else if (remaining <= 0) {
if (isRunning)
statusString = "Seeding (" + curPeers + "/" + knownPeers + " peers)";
else
statusString = "Complete";
} else {
if (isRunning)
if (isRunning && curPeers > 0 && downBps > 0)
statusString = "OK (" + curPeers + "/" + knownPeers + " peers)";
else if (isRunning && curPeers > 0)
statusString = "Stalled (" + curPeers + "/" + knownPeers + " peers)";
else if (isRunning)
statusString = "No Peers (0/" + knownPeers + ")";
else
statusString = "Stopped";
}
@@ -336,20 +367,26 @@ public class I2PSnarkServlet extends HttpServlet {
out.write("</a>");
out.write("</td>\n\t");
out.write("<td valign=\"top\" align=\"left\" class=\"snarkTorrentDownloaded " + rowClass + "\">");
if (remaining > 0) {
out.write(formatSize(total-remaining) + "/" + formatSize(total)); // 18MB/3GB
// lets hold off on the ETA until we have rates sorted...
//out.write(" (eta " + DataHelper.formatDuration(remainingSeconds*1000) + ")"); // (eta 6h)
} else {
out.write(formatSize(total)); // 3GB
}
out.write("<td valign=\"top\" align=\"right\" class=\"snarkTorrentETA " + rowClass + "\">");
if(isRunning && remainingSeconds > 0)
out.write(DataHelper.formatDuration(remainingSeconds*1000)); // (eta 6h)
out.write("</td>\n\t");
out.write("<td valign=\"top\" align=\"left\" class=\"snarkTorrentUploaded " + rowClass
out.write("<td valign=\"top\" align=\"right\" class=\"snarkTorrentDownloaded " + rowClass + "\">");
if (remaining > 0)
out.write(formatSize(total-remaining) + "/" + formatSize(total)); // 18MB/3GB
else
out.write(formatSize(total)); // 3GB
out.write("</td>\n\t");
out.write("<td valign=\"top\" align=\"right\" class=\"snarkTorrentUploaded " + rowClass
+ "\">" + formatSize(uploaded) + "</td>\n\t");
//out.write("<td valign=\"top\" align=\"left\" class=\"snarkTorrentRate\">");
//out.write("n/a"); //2KBps/12KBps/4KBps
//out.write("</td>\n\t");
out.write("<td valign=\"top\" align=\"right\" class=\"snarkTorrentRate\">");
if(isRunning && remaining > 0)
out.write(formatSize(downBps) + "ps");
out.write("</td>\n\t");
out.write("<td valign=\"top\" align=\"right\" class=\"snarkTorrentRate\">");
if(isRunning)
out.write(formatSize(upBps) + "ps");
out.write("</td>\n\t");
out.write("<td valign=\"top\" align=\"left\" class=\"snarkTorrentAction " + rowClass + "\">");
if (isRunning) {
out.write("<a href=\"" + uri + "?action=Stop&nonce=" + _nonce
@@ -540,7 +577,10 @@ public class I2PSnarkServlet extends HttpServlet {
"}\n" +
"th {\n" +
" background-color: #C7D5D5;\n" +
" margin: 0px 0px 0px 0px;\n" +
" padding: 0px 7px 0px 3px;\n" +
"}\n" +
"td {\n" +
" padding: 0px 7px 0px 3px;\n" +
"}\n" +
".snarkTorrentEven {\n" +
" background-color: #E7E7E7;\n" +
@@ -568,19 +608,26 @@ public class I2PSnarkServlet extends HttpServlet {
"<body>\n";
private static final String TABLE_HEADER = "<table border=\"0\" class=\"snarkTorrents\" width=\"100%\">\n" +
private static final String TABLE_HEADER = "<table border=\"0\" class=\"snarkTorrents\" width=\"100%\" cellpadding=\"0 10px\">\n" +
"<thead>\n" +
"<tr><th align=\"left\" valign=\"top\">Status</th>\n" +
" <th align=\"left\" valign=\"top\">Torrent</th>\n" +
" <th align=\"left\" valign=\"top\">Downloaded</th>\n" +
" <th align=\"left\" valign=\"top\">Uploaded</th>\n" +
//" <th align=\"left\" valign=\"top\">Rate</th>\n" +
" <th align=\"right\" valign=\"top\">ETA</th>\n" +
" <th align=\"right\" valign=\"top\">Downloaded</th>\n" +
" <th align=\"right\" valign=\"top\">Uploaded</th>\n" +
" <th align=\"right\" valign=\"top\">Down Rate</th>\n" +
" <th align=\"right\" valign=\"top\">Up Rate</th>\n" +
" <th>&nbsp;</th></tr>\n" +
"</thead>\n";
private static final String TABLE_TOTAL = "<tfoot>\n" +
"<tr><th align=\"left\" valign=\"top\">Totals</th>\n" +
" <th>&nbsp;</th>\n" +
" <th>&nbsp;</th>\n";
private static final String TABLE_EMPTY = "<tr class=\"snarkTorrentEven\">" +
"<td class=\"snarkTorrentEven\" align=\"left\"" +
" valign=\"top\" colspan=\"5\">No torrents</td></tr>\n";
" valign=\"top\" colspan=\"8\">No torrents</td></tr>\n";
private static final String TABLE_FOOTER = "</table>\n";

View File

@@ -50,8 +50,16 @@ public class ConfigNetHelper {
}
public final static String PROP_I2NP_NTCP_HOSTNAME = "i2np.ntcp.hostname";
public final static String PROP_I2NP_NTCP_PORT = "i2np.ntcp.port";
public String getNtcphostname() { return _context.getProperty(PROP_I2NP_NTCP_HOSTNAME); }
public String getNtcpport() { return _context.getProperty(PROP_I2NP_NTCP_PORT); }
public String getNtcphostname() {
String hostname = _context.getProperty(PROP_I2NP_NTCP_HOSTNAME);
if (hostname == null) return "";
return hostname;
}
public String getNtcpport() {
String port = _context.getProperty(PROP_I2NP_NTCP_PORT);
if (port == null) return "";
return port;
}
public String getUdpAddress() {
RouterAddress addr = _context.router().getRouterInfo().getTargetAddress("SSU");

View File

@@ -13,8 +13,8 @@
<b>Ident:</b> <jsp:getProperty name="helper" property="ident" /><br />
<b>Version:</b> <jsp:getProperty name="helper" property="version" /><br />
<b>Uptime:</b> <jsp:getProperty name="helper" property="uptime" /><br />
<b>Now:</b> <jsp:getProperty name="helper" property="time" /><br />
<b>Status:</b> <a href="config.jsp"><jsp:getProperty name="helper" property="reachability" /></a><%
<b>Now:</b> <jsp:getProperty name="helper" property="time" /><!--<br />
<b>Status:</b> <a href="config.jsp"><jsp:getProperty name="helper" property="reachability" /></a>--><%
if (helper.updateAvailable()) {
if ("true".equals(System.getProperty("net.i2p.router.web.UpdateHandler.updateInProgress", "false"))) {
out.print("<br />" + update.getStatus());

View File

@@ -19,7 +19,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 1.7 $
* $Revision: 1.1 $
*/
package i2p.susi.dns;
@@ -99,7 +99,6 @@ public class AddressbookBean
return ConfigBean.addressbookPrefix + filename;
}
private Object[] entries;
public Object[] getEntries()
{
return entries;
@@ -130,10 +129,59 @@ public class AddressbookBean
public void setSerial(String serial) {
this.serial = serial;
}
/** Load addressbook and apply filter, returning messages about this. */
public String getLoadBookMessages()
{
// Config and addressbook now loaded here, hence not needed in getMessages()
loadConfig();
addressbook = new Properties();
String message = "";
try {
addressbook.load( new FileInputStream( getFileName() ) );
LinkedList list = new LinkedList();
Enumeration e = addressbook.keys();
while( e.hasMoreElements() ) {
String name = (String)e.nextElement();
String destination = addressbook.getProperty( name );
if( filter != null && filter.length() > 0 ) {
if( filter.compareTo( "0-9" ) == 0 ) {
char first = name.charAt(0);
if( first < '0' || first > '9' )
continue;
}
else if( ! name.toLowerCase().startsWith( filter.toLowerCase() ) ) {
continue;
}
}
if( search != null && search.length() > 0 ) {
if( name.indexOf( search ) == -1 ) {
continue;
}
}
list.addLast( new AddressBean( name, destination ) );
}
// Format a message about filtered addressbook size, and the number of displayed entries
message = "Filtered list contains " + list.size() + " entries";
if (list.size() > 300) message += ", displaying the first 300."; else message += ".";
Object array[] = list.toArray();
Arrays.sort( array, sorter );
entries = array;
}
catch (Exception e) {
Debug.debug( e.getClass().getName() + ": " + e.getMessage() );
}
if( message.length() > 0 )
message = "<p>" + message + "</p>";
return message;
}
/** Perform actions, returning messages about this. */
public String getMessages()
{
loadConfig();
// Loading config and addressbook moved into getLoadBookMessages()
String message = "";
if( action != null ) {
@@ -175,42 +223,7 @@ public class AddressbookBean
}
action = null;
addressbook = new Properties();
try {
addressbook.load( new FileInputStream( getFileName() ) );
LinkedList list = new LinkedList();
Enumeration e = addressbook.keys();
while( e.hasMoreElements() ) {
String name = (String)e.nextElement();
String destination = addressbook.getProperty( name );
if( filter != null && filter.length() > 0 ) {
if( filter.compareTo( "0-9" ) == 0 ) {
char first = name.charAt(0);
if( first < '0' || first > '9' )
continue;
}
else if( ! name.toLowerCase().startsWith( filter.toLowerCase() ) ) {
continue;
}
}
if( search != null && search.length() > 0 ) {
if( name.indexOf( search ) == -1 ) {
continue;
}
}
list.addLast( new AddressBean( name, destination ) );
}
Object array[] = list.toArray();
Arrays.sort( array, sorter );
entries = array;
}
catch (Exception e) {
Debug.debug( e.getClass().getName() + ": " + e.getMessage() );
}
if( message.length() > 0 )
message = "<p class=\"messages\">" + message + "</p>";
return message;
@@ -234,6 +247,10 @@ public class AddressbookBean
{
return getBook().compareToIgnoreCase( "router" ) == 0;
}
public boolean isPublished()
{
return getBook().compareToIgnoreCase( "published" ) == 0;
}
public void setFilter(String filter) {
if( filter != null && ( filter.length() == 0 || filter.compareToIgnoreCase( "none" ) == 0 ) ) {
filter = null;

View File

@@ -20,7 +20,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 1.1 $
* $Revision: 1.2 $
*/
%>
<%@ page contentType="text/html"%>
@@ -60,6 +60,8 @@
<div id="messages">${book.messages}</div>
<span>${book.loadBookMessages}</span>
<div id="filter">
<p>Filter: <a href="addressbook.jsp?filter=a">a</a>
<a href="addressbook.jsp?filter=b">b</a>
@@ -115,16 +117,17 @@
<table class="book" cellspacing="0" cellpadding="5">
<tr class="head">
<c:if test="${book.master || book.router}">
<c:if test="${book.master || book.router || book.published}">
<th>&nbsp;</th>
</c:if>
<th>Name</th>
<th>Destination</th>
</tr>
<c:forEach items="${book.entries}" var="addr">
<!-- limit iterator to 300, or "Form too large" may result on submit -->
<c:forEach items="${book.entries}" var="addr" begin="0" end="299">
<tr class="list${book.trClass}">
<c:if test="${book.master || book.router}">
<c:if test="${book.master || book.router || book.published}">
<td class="checkbox"><input type="checkbox" name="checked" value="${addr.name}" alt="Mark for deletion"></td>
</c:if>
<td class="names"><a href="http://${addr.name}/">${addr.name}</a> -
@@ -136,7 +139,7 @@
</table>
</div>
<c:if test="${book.master || book.router}">
<c:if test="${book.master || book.router || book.published}">
<div id="buttons">
<p class="buttons"><input type="image" name="action" value="delete" src="images/delete.png" alt="Delete checked" />
</p>

View File

@@ -32,7 +32,7 @@ public class CPUID {
* initialization? this would otherwise use the Log component, but this makes
* it easier for other systems to reuse this class
*/
private static final boolean _doLog = true;
private static final boolean _doLog = System.getProperty("jcpuid.dontLog") == null;
//.matches() is a java 1.4+ addition, using a simplified version for 1.3+
//private static final boolean isX86 = System.getProperty("os.arch").toLowerCase().matches("i?[x0-9]86(_64)?");

View File

@@ -52,8 +52,8 @@ public class AsyncFortunaStandalone extends FortunaStandalone implements Runnabl
long before = System.currentTimeMillis();
long waited = 0;
while (status[nextBuf] != STATUS_FILLED) {
System.out.println(Thread.currentThread().getName() + ": Next PRNG buffer "
+ nextBuf + " isn't ready (" + status[nextBuf] + ")");
//System.out.println(Thread.currentThread().getName() + ": Next PRNG buffer "
// + nextBuf + " isn't ready (" + status[nextBuf] + ")");
//new Exception("source").printStackTrace();
asyncBuffers.notifyAll();
try {
@@ -61,7 +61,7 @@ public class AsyncFortunaStandalone extends FortunaStandalone implements Runnabl
} catch (InterruptedException ie) {}
waited = System.currentTimeMillis()-before;
}
if (waited > 0)
if (waited > 10*1000)
System.out.println(Thread.currentThread().getName() + ": Took " + waited
+ "ms for a full PRNG buffer to be found");
//System.out.println(Thread.currentThread().getName() + ": Switching to prng buffer " + nextBuf);

View File

@@ -14,8 +14,8 @@ package net.i2p;
*
*/
public class CoreVersion {
public final static String ID = "$Revision: 1.66 $ $Date: 2006-07-27 22:35:02 $";
public final static String VERSION = "0.6.1.24";
public final static String ID = "$Revision: 1.8 $ $Date: 2006-09-09 12:46:25 $";
public final static String VERSION = "0.6.1.26";
public static void main(String args[]) {
System.out.println("I2P Core version: " + VERSION);

View File

@@ -14,6 +14,7 @@ import net.i2p.crypto.DummyElGamalEngine;
import net.i2p.crypto.DummyPooledRandomSource;
import net.i2p.crypto.ElGamalAESEngine;
import net.i2p.crypto.ElGamalEngine;
import net.i2p.crypto.HMAC256Generator;
import net.i2p.crypto.HMACGenerator;
import net.i2p.crypto.KeyGenerator;
import net.i2p.crypto.PersistentSessionKeyManager;
@@ -67,8 +68,9 @@ public class I2PAppContext {
private AESEngine _AESEngine;
private LogManager _logManager;
private HMACGenerator _hmac;
private HMAC256Generator _hmac256;
private SHA256Generator _sha;
private Clock _clock;
protected Clock _clock; // overridden in RouterContext
private DSAEngine _dsa;
private RoutingKeyGenerator _routingKeyGenerator;
private RandomSource _random;
@@ -82,8 +84,9 @@ public class I2PAppContext {
private volatile boolean _AESEngineInitialized;
private volatile boolean _logManagerInitialized;
private volatile boolean _hmacInitialized;
private volatile boolean _hmac256Initialized;
private volatile boolean _shaInitialized;
private volatile boolean _clockInitialized;
protected volatile boolean _clockInitialized; // used in RouterContext
private volatile boolean _dsaInitialized;
private volatile boolean _routingKeyGeneratorInitialized;
private volatile boolean _randomInitialized;
@@ -353,6 +356,19 @@ public class I2PAppContext {
_hmacInitialized = true;
}
}
public HMAC256Generator hmac256() {
if (!_hmac256Initialized) initializeHMAC256();
return _hmac256;
}
private void initializeHMAC256() {
synchronized (this) {
if (_hmac256 == null) {
_hmac256 = new HMAC256Generator(this);
}
_hmac256Initialized = true;
}
}
/**
* Our SHA256 instance (see the hmac discussion for why its context specific)
@@ -411,11 +427,11 @@ public class I2PAppContext {
* enable simulators to play with clock skew among different instances.
*
*/
public Clock clock() {
public Clock clock() { // overridden in RouterContext
if (!_clockInitialized) initializeClock();
return _clock;
}
private void initializeClock() {
protected void initializeClock() { // overridden in RouterContext
synchronized (this) {
if (_clock == null)
_clock = new Clock(this);

View File

@@ -0,0 +1,51 @@
package net.i2p.crypto;
import gnu.crypto.hash.Sha256Standalone;
import net.i2p.I2PAppContext;
import net.i2p.data.Base64;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import org.bouncycastle.crypto.Digest;
import org.bouncycastle.crypto.macs.HMac;
/**
* Calculate the HMAC-SHA256 of a key+message. All the good stuff occurs
* in {@link org.bouncycastle.crypto.macs.HMac} and
* {@link org.bouncycastle.crypto.digests.MD5Digest}.
*
*/
public class HMAC256Generator extends HMACGenerator {
public HMAC256Generator(I2PAppContext context) { super(context); }
protected HMac acquire() {
synchronized (_available) {
if (_available.size() > 0)
return (HMac)_available.remove(0);
}
// the HMAC is hardcoded to use SHA256 digest size
// for backwards compatability. next time we have a backwards
// incompatible change, we should update this by removing ", 32"
return new HMac(new Sha256ForMAC());
}
private class Sha256ForMAC extends Sha256Standalone implements Digest {
public String getAlgorithmName() { return "sha256 for hmac"; }
public int getDigestSize() { return 32; }
public int doFinal(byte[] out, int outOff) {
byte rv[] = digest();
System.arraycopy(rv, 0, out, outOff, rv.length);
reset();
return rv.length;
}
}
public static void main(String args[]) {
I2PAppContext ctx = I2PAppContext.getGlobalContext();
byte data[] = new byte[64];
ctx.random().nextBytes(data);
SessionKey key = ctx.keyGenerator().generateSessionKey();
Hash mac = ctx.hmac256().calculate(key, data);
System.out.println(Base64.encode(mac.getData()));
}
}

View File

@@ -20,7 +20,7 @@ import org.bouncycastle.crypto.macs.HMac;
public class HMACGenerator {
private I2PAppContext _context;
/** set of available HMAC instances for calculate */
private List _available;
protected List _available;
/** set of available byte[] buffers for verify */
private List _availableTmp;
@@ -85,7 +85,7 @@ public class HMACGenerator {
return eq;
}
private HMac acquire() {
protected HMac acquire() {
synchronized (_available) {
if (_available.size() > 0)
return (HMac)_available.remove(0);

View File

@@ -9,10 +9,13 @@ package net.i2p.crypto;
*
*/
import gnu.crypto.hash.Sha256Standalone;
import java.math.BigInteger;
import net.i2p.I2PAppContext;
import net.i2p.data.Base64;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.PrivateKey;
import net.i2p.data.PublicKey;
import net.i2p.data.SessionKey;
@@ -53,6 +56,18 @@ public class KeyGenerator {
return key;
}
private static final int PBE_ROUNDS = 1000;
/** PBE the passphrase with the salt */
public SessionKey generateSessionKey(byte salt[], byte passphrase[]) {
byte salted[] = new byte[16+passphrase.length];
System.arraycopy(salt, 0, salted, 0, Math.min(salt.length, 16));
System.arraycopy(passphrase, 0, salted, 16, passphrase.length);
byte h[] = _context.sha().calculateHash(salted).getData();
for (int i = 1; i < PBE_ROUNDS; i++)
_context.sha().calculateHash(h, 0, Hash.HASH_LENGTH, h, 0);
return new SessionKey(h);
}
/** standard exponent size */
private static final int PUBKEY_EXPONENT_SIZE_FULL = 2048;
/**
@@ -95,7 +110,7 @@ public class KeyGenerator {
* @return the corresponding PublicKey object
*/
public static PublicKey getPublicKey(PrivateKey priv) {
BigInteger a = new NativeBigInteger(priv.toByteArray());
BigInteger a = new NativeBigInteger(1, priv.toByteArray());
BigInteger aalpha = CryptoConstants.elgg.modPow(a, CryptoConstants.elgp);
PublicKey pub = new PublicKey();
byte [] pubBytes = aalpha.toByteArray();
@@ -132,7 +147,7 @@ public class KeyGenerator {
* @return a SigningPublicKey object
*/
public static SigningPublicKey getSigningPublicKey(SigningPrivateKey priv) {
BigInteger x = new NativeBigInteger(priv.toByteArray());
BigInteger x = new NativeBigInteger(1, priv.toByteArray());
BigInteger y = CryptoConstants.dsag.modPow(x, CryptoConstants.dsap);
SigningPublicKey pub = new SigningPublicKey();
byte [] pubBytes = padBuffer(y.toByteArray(), SigningPublicKey.KEYSIZE_BYTES);

View File

@@ -9,6 +9,7 @@ package net.i2p.data;
*
*/
import gnu.crypto.hash.Sha256Standalone;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
@@ -770,9 +771,11 @@ public class DataHelper {
* Read a newline delimited line from the stream, returning the line (without
* the newline), or null if EOF reached before the newline was found
*/
public static String readLine(InputStream in) throws IOException {
public static String readLine(InputStream in) throws IOException { return readLine(in, (Sha256Standalone)null); }
/** update the hash along the way */
public static String readLine(InputStream in, Sha256Standalone hash) throws IOException {
StringBuffer buf = new StringBuffer(128);
boolean ok = readLine(in, buf);
boolean ok = readLine(in, buf, hash);
if (ok)
return buf.toString();
else
@@ -785,8 +788,13 @@ public class DataHelper {
* newline was found
*/
public static boolean readLine(InputStream in, StringBuffer buf) throws IOException {
return readLine(in, buf, null);
}
/** update the hash along the way */
public static boolean readLine(InputStream in, StringBuffer buf, Sha256Standalone hash) throws IOException {
int c = -1;
while ( (c = in.read()) != -1) {
if (hash != null) hash.update((byte)c);
if (c == '\n')
break;
buf.append((char)c);
@@ -797,6 +805,10 @@ public class DataHelper {
return true;
}
public static void write(OutputStream out, byte data[], Sha256Standalone hash) throws IOException {
hash.update(data);
out.write(data);
}
public static List sortStructures(Collection dataStructures) {
if (dataStructures == null) return new ArrayList();

View File

@@ -32,6 +32,7 @@ public class PrivateKey extends DataStructureImpl {
public PrivateKey() {
setData(null);
}
public PrivateKey(byte data[]) { setData(data); }
/** constructs from base64
* @param base64Data a string of base64 data (the output of .toBase64() called

View File

@@ -31,6 +31,11 @@ public class PublicKey extends DataStructureImpl {
public PublicKey() {
setData(null);
}
public PublicKey(byte data[]) {
if ( (data == null) || (data.length != KEYSIZE_BYTES) )
throw new IllegalArgumentException("Data must be specified, and the correct size");
setData(data);
}
/** constructs from base64
* @param base64Data a string of base64 data (the output of .toBase64() called

View File

@@ -86,7 +86,7 @@ public class RouterIdentity extends DataStructureImpl {
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
if ((_certificate == null) || (_publicKey == null) || (_signingKey == null))
throw new DataFormatException("Not enough data to format the destination");
throw new DataFormatException("Not enough data to format the router identity");
_publicKey.writeBytes(out);
_signingKey.writeBytes(out);
_certificate.writeBytes(out);

View File

@@ -112,11 +112,19 @@ public class NtpClient {
// Process response
NtpMessage msg = new NtpMessage(packet.getData());
double roundTripDelay = (destinationTimestamp-msg.originateTimestamp) -
(msg.receiveTimestamp-msg.transmitTimestamp);
double localClockOffset = ((msg.receiveTimestamp - msg.originateTimestamp) +
(msg.transmitTimestamp - destinationTimestamp)) / 2;
socket.close();
// Stratum must be between 1 (atomic) and 15 (maximum defined value)
// Anything else is right out, treat such responses like errors
if ((msg.stratum < 1) || (msg.stratum > 15)) {
//System.out.println("Response from NTP server of unacceptable stratum " + msg.stratum + ", failing.");
return(-1);
}
long rv = (long)(System.currentTimeMillis() + localClockOffset*1000);
//System.out.println("host: " + address.getHostAddress() + " rtt: " + roundTripDelay + " offset: " + localClockOffset + " seconds");

View File

@@ -13,12 +13,16 @@ import net.i2p.time.Timestamper;
* between the local computer's current time and the time as known by some reference
* (such as an NTP synchronized clock).
*
* Protected members are used in the subclass RouterClock,
* which has access to a router's transports (particularly peer clock skews)
* to second-guess the sanity of clock adjustments.
*
*/
public class Clock implements Timestamper.UpdateListener {
private I2PAppContext _context;
protected I2PAppContext _context;
private Timestamper _timestamper;
private long _startedOn;
private boolean _statCreated;
protected long _startedOn;
protected boolean _statCreated;
public Clock(I2PAppContext context) {
_context = context;
@@ -36,10 +40,10 @@ public class Clock implements Timestamper.UpdateListener {
public Timestamper getTimestamper() { return _timestamper; }
/** we fetch it on demand to avoid circular dependencies (logging uses the clock) */
private Log getLog() { return _context.logManager().getLog(Clock.class); }
protected Log getLog() { return _context.logManager().getLog(Clock.class); }
private volatile long _offset;
private boolean _alreadyChanged;
protected volatile long _offset;
protected boolean _alreadyChanged;
private Set _listeners;
/** if the clock is skewed by 3+ days, fuck 'em */
@@ -132,7 +136,7 @@ public class Clock implements Timestamper.UpdateListener {
}
}
private void fireOffsetChanged(long delta) {
protected void fireOffsetChanged(long delta) {
synchronized (_listeners) {
for (Iterator iter = _listeners.iterator(); iter.hasNext();) {
ClockUpdateListener lsnr = (ClockUpdateListener) iter.next();

View File

@@ -13,6 +13,7 @@ import net.i2p.util.Log;
public class EepPost {
private I2PAppContext _context;
private Log _log;
private static final String CRLF = "\r\n";
public EepPost() {
this(I2PAppContext.getGlobalContext());
@@ -65,6 +66,7 @@ public class EepPost {
_onCompletion = onCompletion;
}
public void run() {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Running the post task");
Socket s = null;
try {
URL u = new URL(_url);
@@ -81,17 +83,20 @@ public class EepPost {
_proxyPort = p;
}
if (_log.shouldLog(Log.DEBUG)) _log.debug("Connecting to the server/proxy...");
s = new Socket(_proxyHost, _proxyPort);
if (_log.shouldLog(Log.DEBUG)) _log.debug("Connected");
OutputStream out = s.getOutputStream();
String sep = getSeparator();
long length = calcContentLength(sep, _fields);
if (_log.shouldLog(Log.DEBUG)) _log.debug("Separator: " + sep + " content length: " + length);
String header = getHeader(isProxy, path, h, p, sep, length);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Header: \n" + header);
out.write(header.getBytes());
out.flush();
if (false) {
out.write(("--" + sep + "\ncontent-disposition: form-data; name=\"field1\"\n\nStuff goes here\n--" + sep + "--\n").getBytes());
out.write(("--" + sep + CRLF + "content-disposition: form-data; name=\"field1\"" + CRLF + CRLF + "Stuff goes here" + CRLF + "--" + sep + "--" + CRLF).getBytes());
} else {
sendFields(out, sep, _fields);
}
@@ -121,18 +126,18 @@ public class EepPost {
Object val = fields.get(key);
if (val instanceof File) {
File f = (File)val;
len += ("--" + sep + "\nContent-Disposition: form-data; name=\"" + key + "\"; filename=\"" + f.getName() + "\"\n").length();
len += ("--" + sep + CRLF + "Content-Disposition: form-data; name=\"" + key + "\"; filename=\"" + f.getName() + "\"" + CRLF).length();
//len += ("Content-length: " + f.length() + "\n").length();
len += ("Content-Type: application/octet-stream\n\n").length();
len += ("Content-Type: application/octet-stream" + CRLF + CRLF).length();
len += f.length();
len += 1; // nl
len += CRLF.length(); // nl
} else {
len += ("--" + sep + "\nContent-Disposition: form-data; name=\"" + key + "\"\n\n").length();
len += ("--" + sep + CRLF + "Content-Disposition: form-data; name=\"" + key + "\"" + CRLF + CRLF).length();
len += val.toString().length();
len += 1; // nl
len += CRLF.length(); // nl
}
}
len += 2 + sep.length() + 2;
len += 2 + sep.length() + 2 + CRLF.length(); //2 + sep.length() + 2;
//len += 2;
return len;
}
@@ -145,29 +150,29 @@ public class EepPost {
else
sendField(out, separator, field, val.toString());
}
out.write(("--" + separator + "--\n").getBytes());
out.write(("--" + separator + "--" + CRLF).getBytes());
}
private void sendFile(OutputStream out, String separator, String field, File file) throws IOException {
long len = file.length();
out.write(("--" + separator + "\n").getBytes());
out.write(("Content-Disposition: form-data; name=\"" + field + "\"; filename=\"" + file.getName() + "\"\n").getBytes());
out.write(("--" + separator + CRLF).getBytes());
out.write(("Content-Disposition: form-data; name=\"" + field + "\"; filename=\"" + file.getName() + "\"" + CRLF).getBytes());
//out.write(("Content-length: " + len + "\n").getBytes());
out.write(("Content-Type: application/octet-stream\n\n").getBytes());
out.write(("Content-Type: application/octet-stream" + CRLF + CRLF).getBytes());
FileInputStream in = new FileInputStream(file);
byte buf[] = new byte[1024];
int read = -1;
while ( (read = in.read(buf)) != -1)
out.write(buf, 0, read);
out.write("\n".getBytes());
out.write(CRLF.getBytes());
in.close();
}
private void sendField(OutputStream out, String separator, String field, String val) throws IOException {
out.write(("--" + separator + "\n").getBytes());
out.write(("Content-Disposition: form-data; name=\"" + field + "\"\n\n").getBytes());
out.write(("--" + separator + CRLF).getBytes());
out.write(("Content-Disposition: form-data; name=\"" + field + "\"" + CRLF + CRLF).getBytes());
out.write(val.getBytes());
out.write("\n".getBytes());
out.write(CRLF.getBytes());
}
private String getHeader(boolean isProxy, String path, String host, int port, String separator, long length) {
@@ -179,16 +184,16 @@ public class EepPost {
buf.append(":").append(port);
}
buf.append(path);
buf.append(" HTTP/1.1\n");
buf.append(" HTTP/1.1" + CRLF);
buf.append("Host: ").append(host);
if (port != 80)
buf.append(":").append(port);
buf.append("\n");
buf.append("Connection: close\n");
buf.append("Content-length: ").append(length).append("\n");
buf.append(CRLF);
buf.append("Connection: close" + CRLF);
buf.append("Content-length: ").append(length).append(CRLF);
buf.append("Content-type: multipart/form-data, boundary=").append(separator);
buf.append("\n");
buf.append("\n");
buf.append(CRLF);
buf.append(CRLF);
return buf.toString();
}

View File

@@ -91,7 +91,7 @@ public class NativeBigInteger extends BigInteger {
* initialization? this would otherwise use the Log component, but this makes
* it easier for other systems to reuse this class
*/
private static final boolean _doLog = true;
private static final boolean _doLog = System.getProperty("jbigi.dontLog") == null;
private final static String JBIGI_OPTIMIZATION_K6 = "k6";
private final static String JBIGI_OPTIMIZATION_K6_2 = "k62";

View File

@@ -1,4 +1,152 @@
$Id: history.txt,v 1.502 2006-07-28 23:43:04 jrandom Exp $
$Id: history.txt,v 1.531 2006-10-08 17:52:59 complication Exp $
* 2006-10-10 0.6.1.26 released
2006-10-10 jrandom
* Removed the status display from the console, as its more confusing
than informative (though the content is still displayed in the HTML)
2006-10-08 Complication
* Add a framed average peer clock skew calculator
* Add config property "router.clockOffsetSanityCheck" to determine
if NTP-suggested clock offsets get sanity checked (default "true")
* Reject NTP-suggested clock offsets if they'd increase peer clock skew
by more than 5 seconds, or make it more than 20 seconds total
* Decrease log level in getMedianPeerClockSkew()
2006-09-29 zzz
* i2psnark: Second try at synchronization fix - synch addRequest()
completely rather than just portions of it and requestNextPiece()
2006-09-27 jrandom
* added HMAC-SHA256
* properly use CRLF with EepPost
* suppress jbigi/jcpuid messages if jbigi.dontLog/jcpuid.dontLog is set
* PBE session key generation (with 1000 rounds of SHA256)
* misc SDK helper functions
2006-09-26 Complication
* Take back another inadverent logging change in NTCPConnection
2006-09-26 Complication
* Take back an accidental log level change
2006-09-26 Complication
* Subclass from Clock a RouterClock which can access router transports,
with the goal of developing it to second-guess NTP results
* Make transports report clock skew in seconds
* Adjust renderStatusHTML() methods accordingly
* Show average for NTCP clock skews too
* Give transports a getClockSkews() method to report clock skews
* Give transport manager a getClockSkews() method to aggregate results
* Give comm system facade a getMedianPeerClockSkew() method which RouterClock calls
(to observe results, add "net.i2p.router.transport.CommSystemFacadeImpl=WARN" to logging)
* Extra explicitness in NTCP classes to denote unit of time.
* Fix some places in NTCPConnection where milliseconds and seconds were confused
2006-09-25 zzz
* i2psnark: Paranoid copy before writing pieces,
recheck files on completion, redownload bad pieces
* i2psnark: Don't contact tracker as often when seeding
2006-09-24 zzz
* i2psnark: Add some synchronization to prevent rare problem
after restoring orphan piece
2006-09-20 zzz
* i2psnark: Eliminate duplicate requests caused by i2p-bt's
rapid choke/unchokes
* i2psnark: Truncate long TrackerErr messages on web page
2006-09-16 zzz
* i2psnark: Implement retransmission of requests. This
eliminates one cause of complete stalls with a peer.
This problem is common on torrents with a small number of
active peers where there are no choke/unchokes to kickstart things.
2006-09-13 zzz
* i2psnark: Fix restoral of partial pieces broken by last patch
2006-09-13 zzz
* i2psnark: Mark a peer's requests as unrequested on disconnect,
preventing premature end game
* i2psnark: Randomize selection of next piece during end game
* i2psnark: Don't restore a partial piece to a peer that is already working on it
* i2psnark: strip ".torrent" on web page
* i2psnark: Limit piece size in generated torrent to 1MB max
2006-09-09 zzz
* i2psnark: Add "Stalled" indication and stat totals on web page
2006-09-09 zzz
* i2psnark: Fix bug where new peers would always be sent an "interested"
regardless of actual interest
* i2psnark: Reduce max piece size from 10MB to 1MB; larger may have severe
memory and efficiency problems
* 2006-09-09 0.6.1.25 released
2006-09-08 jrandom
* Tweak the PRNG logging so it only displays error messages if there are
problems
* Disable dynamic router keys for the time being, as they don't offer
meaningful security, may hurt the router, and makes it harder to
determine the network health. The code to restart on SSU IP change is
still enabled however.
* Disable tunnel load testing, leaning back on the tiered selection for
the time being.
* Spattering of bugfixes
2006-09-07 zzz
* i2psnark: Increase output timeout from 2 min to 4 min
* i2psnark: Orphan debug msg cleanup
* i2psnark: More web rate report cleanup
2006-09-05 zzz
* i2psnark: Implement basic partial-piece saves across connections
* i2psnark: Implement keep-alive sending. This will keep non-i2psnark clients
from dropping us for inactivity but also renders the 2-minute transmit-inactivity
code in i2psnark ineffective. Will have to research why there is transmit but
not receive inactivity code. With the current connection limit of 24 peers
we aren't in any danger of keeping out new peers by keeping inactive ones.
* i2psnark: Increase CHECK_PERIOD from 20 to 40 since nothing happens in 20 seconds
* i2psnark: Fix dropped chunk handling
* i2psnark: Web rate report cleanup
2006-09-04 zzz
* i2psnark: Report cleared trackerErr immediately
* i2psnark: Add trackerErr reporting after previous success; retry more quickly
* i2psnark: Set up new connections more quickly
* i2psnark: Don't delay tracker fetch when setting up lots of connections
* i2psnark: Reduce MAX_UPLOADERS from 12 to 4
2006-09-04 zzz
* Enable pipelining in i2psnark
* Make i2psnark tunnel default be 1 + 0-1
2006-09-03 zzz
* Add rate reporting to i2psnark
2006-09-03 Complication
* Limit form size in SusiDNS to avoid exceeding a POST size limit on postback
* Print messages about addressbook size to give better overview
* Enable delete function in published addressbook
2006-08-21 Complication
* Fix error reporting discrepancy (thanks for helping notice, yojoe!)
2006-08-03 jrandom
* Decrease the recently modified tunnel building timeout, though keep
the scaling on their processing
2006-07-31 jrandom
* Increase the tunnel building timeout
* Avoid a rare race (thanks bar!)
* Fix the bandwidth capacity publishing code to factor in share percentage
and outbound throttling (oops)
2006-07-29 Complication
* Treat NTP responses from unexpected stratums like failures
* 2006-07-28 0.6.1.24 released

View File

@@ -1,5 +1,5 @@
<i2p.news date="$Date: 2006-07-18 15:08:00 $">
<i2p.release version="0.6.1.24" date="2006/07/29" minVersion="0.6"
<i2p.news date="$Date: 2006-07-29 13:03:16 $">
<i2p.release version="0.6.1.25" date="2006/07/29" minVersion="0.6"
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"

View File

@@ -4,7 +4,7 @@
<info>
<appname>i2p</appname>
<appversion>0.6.1.24</appversion>
<appversion>0.6.1.26</appversion>
<authors>
<author name="I2P" email="support@i2p.net"/>
</authors>

View File

@@ -1,5 +1,5 @@
<i2p.news date="$Date: 2006-07-27 22:34:59 $">
<i2p.release version="0.6.1.24" date="2006/07/29" minVersion="0.6"
<i2p.news date="$Date: 2006-09-13 22:16:53 $">
<i2p.release version="0.6.1.25" date="2006/07/29" minVersion="0.6"
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"
@@ -10,12 +10,15 @@
anonlogs="http://i2p/Nf3ab-ZFkmI-LyMt7GjgT-jfvZ3zKDl0L96pmGQXF1B82W2Bfjf0n7~288vafocjFLnQnVcmZd~-p0-Oolfo9aW2Rm-AhyqxnxyLlPBqGxsJBXjPhm1JBT4Ia8FB-VXt0BuY0fMKdAfWwN61-tj4zIcQWRxv3DFquwEf035K~Ra4SWOqiuJgTRJu7~o~DzHVljVgWIzwf8Z84cz0X33pv-mdG~~y0Bsc2qJVnYwjjR178YMcRSmNE0FVMcs6f17c6zqhMw-11qjKpY~EJfHYCx4lBWF37CD0obbWqTNUIbL~78vxqZRT3dgAgnLixog9nqTO-0Rh~NpVUZnoUi7fNR~awW5U3Cf7rU7nNEKKobLue78hjvRcWn7upHUF45QqTDuaM3yZa7OsjbcH-I909DOub2Q0Dno6vIwuA7yrysccN1sbnkwZbKlf4T6~iDdhaSLJd97QCyPOlbyUfYy9QLNExlRqKgNVJcMJRrIual~Lb1CLbnzt0uvobM57UpqSAAAA/meeting141"
publiclogs="http://www.i2p.net/meeting141" />
&#149;
2006-07-18: 0.6.1.22 <a href="http://dev.i2p/pipermail/i2p/2006-July/001300.html">released</a>
2006-09-09: 0.6.1.25 <a href="http://dev.i2p/pipermail/i2p/2006-September/001306.html">released</a>
with i2psnark, SusiDNS, NTP, and other fixes.
<br />
&#149;
2006-06-13:
<a href="http://dev.i2p/pipermail/i2p/2006-June/001293.html">status notes</a>
and
<a href="http://www.i2p/meeting182">meeting log</a>
2006-09-12:
<a href="http://www.i2p/meeting184">meeting log</a>
<br />
&#149;
2006-10-03:
<a href="http://dev.i2p/pipermail/i2p/2006-October/001309.html">status notes</a>
<br />
</i2p.news>

View File

@@ -33,6 +33,18 @@ public abstract class CommSystemFacade implements Service {
public int countActiveSendPeers() { return 0; }
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
/**
* Median clock skew of connected peers in seconds, or null if we cannot answer.
* CommSystemFacadeImpl overrides this.
*/
public Long getMedianPeerClockSkew() { return null; }
/**
* Return framed average clock skew of connected peers in seconds, or null if we cannot answer.
* CommSystemFacadeImpl overrides this.
*/
public Long getFramedAveragePeerClockSkew(int percentToInclude) { return null; }
/**
* Determine under what conditions we are remotely reachable.
*

View File

@@ -78,7 +78,11 @@ public class LoadTestManager {
private static final boolean DEFAULT_ENABLE = false;
/** disable all load testing for the moment */
private static final boolean FORCE_DISABLE = true;
public static boolean isEnabled(I2PAppContext ctx) {
if (FORCE_DISABLE) return false;
String enable = ctx.getProperty("router.enableLoadTesting");
if ( (DEFAULT_ENABLE) && (enable != null) && (!Boolean.valueOf(enable).booleanValue()) )
return false;
@@ -130,6 +134,7 @@ public class LoadTestManager {
* Actually send the messages through the given tunnel
*/
private void runTest(LoadTestTunnelConfig tunnel) {
if (!isEnabled(_context)) return;
log(tunnel, "start");
int peerMessages = getPeerMessages();
if (_log.shouldLog(Log.DEBUG))
@@ -208,9 +213,17 @@ public class LoadTestManager {
// this should take into consideration both the inbound and outbound tunnels
// ... but it doesn't, yet.
_context.messageRegistry().registerPending(new Selector(tunnel, payloadMessage.getUniqueId()),
new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), true),
new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), false),
long uniqueId = -1;
if (payloadMessage != null) {
uniqueId = payloadMessage.getUniqueId();
} else {
tunnel.logComplete();
_active.remove(tunnel);
return;
}
_context.messageRegistry().registerPending(new Selector(tunnel, uniqueId),
new SendAgain(_context, tunnel, uniqueId, true),
new SendAgain(_context, tunnel, uniqueId, false),
10*1000);
_context.tunnelDispatcher().dispatchOutbound(payloadMessage, outbound.getSendTunnelId(0),
inbound.getReceiveTunnelId(0),

View File

@@ -240,8 +240,10 @@ public class Router {
readConfig();
setupHandlers();
if ("true".equalsIgnoreCase(_context.getProperty(Router.PROP_HIDDEN, "false")))
killKeys();
if (ALLOW_DYNAMIC_KEYS) {
if ("true".equalsIgnoreCase(_context.getProperty(Router.PROP_HIDDEN, "false")))
killKeys();
}
_context.messageValidator().startup();
_context.tunnelDispatcher().startup();
@@ -372,7 +374,8 @@ public class Router {
public void addCapabilities(RouterInfo ri) {
int bwLim = Math.min(_context.bandwidthLimiter().getInboundKBytesPerSecond(),
_context.bandwidthLimiter().getInboundKBytesPerSecond());
_context.bandwidthLimiter().getOutboundKBytesPerSecond());
bwLim = (int)(((float)bwLim) * getSharePercentage());
if (_log.shouldLog(Log.WARN))
_log.warn("Adding capabilities w/ bw limit @ " + bwLim, new Exception("caps"));
@@ -814,11 +817,19 @@ public class Router {
finalShutdown(exitCode);
}
/**
* disable dynamic key functionality for the moment, as it may be harmful and doesn't
* add meaningful anonymity
*/
private static final boolean ALLOW_DYNAMIC_KEYS = false;
public void finalShutdown(int exitCode) {
_log.log(Log.CRIT, "Shutdown(" + exitCode + ") complete", new Exception("Shutdown"));
try { _context.logManager().shutdown(); } catch (Throwable t) { }
if ("true".equalsIgnoreCase(_context.getProperty(PROP_DYNAMIC_KEYS, "false")))
killKeys();
if (ALLOW_DYNAMIC_KEYS) {
if ("true".equalsIgnoreCase(_context.getProperty(PROP_DYNAMIC_KEYS, "false")))
killKeys();
}
File f = new File(getPingFile());
f.delete();
@@ -1318,4 +1329,4 @@ class PersistRouterInfoJob extends JobImpl {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
}
}

View File

@@ -0,0 +1,104 @@
package net.i2p.router;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
import net.i2p.router.RouterContext;
import net.i2p.util.Clock;
/**
* Alternate location for determining the time which takes into account an offset.
* This offset will ideally be periodically updated so as to serve as the difference
* between the local computer's current time and the time as known by some reference
* (such as an NTP synchronized clock).
*
* RouterClock is a subclass of Clock with access to router transports.
* Configuration permitting, it will block clock offset changes
* which would increase peer clock skew.
*/
public class RouterClock extends Clock {
RouterContext _context;
public RouterClock(RouterContext context) {
super(context);
_context = context;
}
/**
* Specify how far away from the "correct" time the computer is - a positive
* value means that we are slow, while a negative value means we are fast.
*
*/
public void setOffset(long offsetMs, boolean force) {
if (false) return;
long delta = offsetMs - _offset;
if (!force) {
if ((offsetMs > MAX_OFFSET) || (offsetMs < 0 - MAX_OFFSET)) {
getLog().error("Maximum offset shift exceeded [" + offsetMs + "], NOT HONORING IT");
return;
}
// only allow substantial modifications before the first 10 minutes
if (_alreadyChanged && (System.currentTimeMillis() - _startedOn > 10 * 60 * 1000)) {
if ( (delta > MAX_LIVE_OFFSET) || (delta < 0 - MAX_LIVE_OFFSET) ) {
getLog().log(Log.CRIT, "The clock has already been updated, but you want to change it by "
+ delta + " to " + offsetMs + "? Did something break?");
return;
}
}
if ((delta < MIN_OFFSET_CHANGE) && (delta > 0 - MIN_OFFSET_CHANGE)) {
getLog().debug("Not changing offset since it is only " + delta + "ms");
_alreadyChanged = true;
return;
}
// If so configured, check sanity of proposed clock offset
if (Boolean.valueOf(_context.getProperty("router.clockOffsetSanityCheck","true")).booleanValue() == true) {
// Try calculating peer clock skew
Long peerClockSkew = _context.commSystem().getFramedAveragePeerClockSkew(50);
if (peerClockSkew != null) {
// Predict the effect of applying the proposed clock offset
long currentPeerClockSkew = peerClockSkew.longValue();
long predictedPeerClockSkew = currentPeerClockSkew + (delta / 1000l);
// Fail sanity check if applying the offset would increase peer clock skew
if ((Math.abs(predictedPeerClockSkew) > (Math.abs(currentPeerClockSkew) + 5)) ||
(Math.abs(predictedPeerClockSkew) > 20)) {
getLog().error("Ignoring clock offset " + offsetMs + "ms (current " + _offset +
"ms) since it would increase peer clock skew from " + currentPeerClockSkew +
"s to " + predictedPeerClockSkew + "s. Broken server in pool.ntp.org?");
return;
} else {
getLog().debug("Approving clock offset " + offsetMs + "ms (current " + _offset +
"ms) since it would decrease peer clock skew from " + currentPeerClockSkew +
"s to " + predictedPeerClockSkew + "s.");
}
}
} // check sanity
}
if (_alreadyChanged) {
if (delta > 15*1000)
getLog().log(Log.CRIT, "Updating clock offset to " + offsetMs + "ms from " + _offset + "ms");
else if (getLog().shouldLog(Log.INFO))
getLog().info("Updating clock offset to " + offsetMs + "ms from " + _offset + "ms");
if (!_statCreated)
_context.statManager().createRateStat("clock.skew", "How far is the already adjusted clock being skewed?", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
_statCreated = true;
_context.statManager().addRateData("clock.skew", delta, 0);
} else {
getLog().log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms from " + _offset + "ms");
}
_alreadyChanged = true;
_offset = offsetMs;
fireOffsetChanged(delta);
}
}

View File

@@ -26,6 +26,8 @@ import net.i2p.router.transport.OutboundMessageRegistry;
import net.i2p.router.transport.VMCommSystem;
import net.i2p.router.tunnel.pool.TunnelPoolManager;
import net.i2p.router.tunnel.TunnelDispatcher;
import net.i2p.util.Clock;
import net.i2p.router.RouterClock;
/**
* Build off the core I2P context to provide a root for a router instance to
@@ -59,13 +61,15 @@ public class RouterContext extends I2PAppContext {
private MessageValidator _messageValidator;
private MessageStateMonitor _messageStateMonitor;
private RouterThrottle _throttle;
private RouterClock _clock;
private Calculator _isFailingCalc;
private Calculator _integrationCalc;
private Calculator _speedCalc;
private Calculator _reliabilityCalc;
private Calculator _capacityCalc;
private Calculator _oldSpeedCalc;
private static List _contexts = new ArrayList(1);
public RouterContext(Router router) { this(router, null); }
@@ -323,4 +327,25 @@ public class RouterContext extends I2PAppContext {
}
return super.getProperty(propName, defaultVal);
}
/**
* The context's synchronized clock, which is kept context specific only to
* enable simulators to play with clock skew among different instances.
*
* It wouldn't be necessary to override clock(), except for the reason
* that it triggers initializeClock() of which we definitely
* need the local version to run.
*/
public Clock clock() {
if (!_clockInitialized) initializeClock();
return _clock;
}
protected void initializeClock() {
synchronized (this) {
if (_clock == null)
_clock = new RouterClock(this);
_clockInitialized = true;
}
}
}

View File

@@ -15,8 +15,8 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.441 $ $Date: 2006-07-28 23:41:16 $";
public final static String VERSION = "0.6.1.24";
public final static String ID = "$Revision: 1.466 $ $Date: 2006-10-08 17:53:00 $";
public final static String VERSION = "0.6.1.26";
public final static long BUILD = 0;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);

View File

@@ -44,7 +44,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
_isLease = isLease;
_lookupsRemaining = 0;
_dead = false;
_out = new ArrayList(2);
_out = Collections.synchronizedList(new ArrayList(2));
_replySelector = new FloodOnlyLookupSelector(getContext(), this);
_onReply = new FloodOnlyLookupMatchJob(getContext(), this);
_onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
@@ -70,7 +70,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
return;
}
OutNetMessage out = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
_out.add(out);
synchronized (_out) { _out.add(out); }
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
Hash peer = (Hash)floodfillPeers.get(i);
@@ -113,8 +113,10 @@ class FloodOnlySearchJob extends FloodSearchJob {
if (_dead) return;
_dead = true;
}
for (int i = 0; i < _out.size(); i++) {
OutNetMessage out = (OutNetMessage)_out.get(i);
List outBuf = null;
synchronized (_out) { outBuf = new ArrayList(_out); }
for (int i = 0; i < outBuf.size(); i++) {
OutNetMessage out = (OutNetMessage)outBuf.get(i);
getContext().messageRegistry().unregisterPending(out);
}
int timeRemaining = (int)(_origExpiration - getContext().clock().now());

View File

@@ -16,6 +16,8 @@ import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.Collections;
import net.i2p.data.RouterAddress;
import net.i2p.router.CommSystemFacade;
@@ -57,6 +59,77 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
public int countActivePeers() { return (_manager == null ? 0 : _manager.countActivePeers()); }
public int countActiveSendPeers() { return (_manager == null ? 0 : _manager.countActiveSendPeers()); }
/**
* Median clock skew of connected peers in seconds, or null if we cannot answer.
*/
public Long getMedianPeerClockSkew() {
if (_manager == null) {
if (_log.shouldLog(Log.INFO))
_log.info("Returning null for median peer clock skew (no transport manager)!");
return null;
}
Vector skews = _manager.getClockSkews();
if (skews == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Returning null for median peer clock skew (no data)!");
return null;
}
if (skews.size() < 5) {
if (_log.shouldLog(Log.ERROR))
_log.error("Returning null for median peer clock skew (only " + skews.size() + " peers)!");
return null;
}
// Going to calculate, let's sort them
Collections.sort(skews);
// Pick out median
Long medianPeerClockSkew = (Long) (skews.get(skews.size() / 2));
if (_log.shouldLog(Log.INFO))
_log.info("Our median peer clock skew is " + medianPeerClockSkew + " s.");
return medianPeerClockSkew;
}
/**
* Framed average clock skew of connected peers in seconds, or null if we cannot answer.
* Average is calculated over the middle "percentToInclude" peers.
*/
public Long getFramedAveragePeerClockSkew(int percentToInclude) {
if (_manager == null) {
if (_log.shouldLog(Log.INFO))
_log.info("Returning null for framed averege peer clock skew (no transport manager)!");
return null;
}
Vector skews = _manager.getClockSkews();
if (skews == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Returning null for framed average peer clock skew (no data)!");
return null;
}
if (skews.size() < 5) {
if (_log.shouldLog(Log.ERROR))
_log.error("Returning null for framed average peer clock skew (only " + skews.size() + " peers)!");
return null;
}
// Going to calculate, sort them
Collections.sort(skews);
// Calculate frame size
int frameSize = (skews.size() * percentToInclude / 100);
int first = (skews.size() / 2) - (frameSize / 2);
int last = (skews.size() / 2) + (frameSize / 2);
// Sum skew values
long sum = 0;
for (int i = first; i < last; i++) {
long value = ((Long) (skews.get(i))).longValue();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Adding clock skew " + i + " valued " + value + " s.");
sum = sum + value;
}
// Calculate average
Long framedAverageClockSkew = new Long(sum / frameSize);
if (_log.shouldLog(Log.INFO))
_log.info("Our framed average peer clock skew is " + framedAverageClockSkew + " s.");
return framedAverageClockSkew;
}
public List getBids(OutNetMessage msg) {
return _manager.getBids(msg);

View File

@@ -171,6 +171,7 @@ public class OutboundMessageRegistry {
}
public void unregisterPending(OutNetMessage msg) {
if (msg == null) return;
MessageSelector sel = msg.getReplySelector();
boolean stillActive = false;
synchronized (_selectorToMessage) {

View File

@@ -12,6 +12,7 @@ import java.io.IOException;
import java.io.Writer;
import java.util.List;
import java.util.Set;
import java.util.Vector;
import net.i2p.data.Hash;
import net.i2p.data.RouterAddress;
@@ -40,6 +41,7 @@ public interface Transport {
public int countActivePeers();
public int countActiveSendPeers();
public Vector getClockSkews();
public List getMostRecentErrorMessages();
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException;

View File

@@ -67,6 +67,13 @@ public abstract class TransportImpl implements Transport {
*/
public int countActiveSendPeers() { return 0; }
/**
* Return our peer clock skews on a transport.
* Vector composed of Long, each element representing a peer skew in seconds.
* Dummy version. Transports override it.
*/
public Vector getClockSkews() { return new Vector(); }
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
/**
* Nonblocking call to pull the next outbound message

View File

@@ -18,6 +18,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import net.i2p.data.Hash;
import net.i2p.data.RouterAddress;
import net.i2p.data.RouterIdentity;
@@ -150,6 +151,23 @@ public class TransportManager implements TransportEventListener {
return peers;
}
/**
* Return our peer clock skews on all transports.
* Vector composed of Long, each element representing a peer skew in seconds.
* Note: this method returns them in whimsical order.
*/
public Vector getClockSkews() {
Vector skews = new Vector();
for (int i = 0; i < _transports.size(); i++) {
Vector tempSkews = ((Transport)_transports.get(i)).getClockSkews();
if ((tempSkews == null) || (tempSkews.size() <= 0)) continue;
skews.addAll(tempSkews);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Transport manager returning " + skews.size() + " peer clock skews.");
return skews;
}
public short getReachabilityStatus() {
if (_transports.size() <= 0) return CommSystemFacade.STATUS_UNKNOWN;
short status[] = new short[_transports.size()];

View File

@@ -192,7 +192,7 @@ public class EstablishState {
System.arraycopy(_X, 0, xy, 0, _X.length);
System.arraycopy(_Y, 0, xy, _X.length, _Y.length);
Hash hxy = _context.sha().calculateHash(xy);
_tsB = _context.clock().now()/1000l;
_tsB = _context.clock().now()/1000l; // our (Bob's) timestamp in seconds
byte padding[] = new byte[12]; // the encrypted data needs an extra 12 bytes
_context.random().nextBytes(padding);
byte toEncrypt[] = new byte[hxy.getData().length+4+padding.length];
@@ -318,6 +318,8 @@ public class EstablishState {
}
}
}
if (_e_hXY_tsB == null) return; // !src.hasRemaining
while (_received < _Y.length + _e_hXY_tsB.length && src.hasRemaining()) {
int i = _received-_Y.length;
_received++;
@@ -339,8 +341,8 @@ public class EstablishState {
fail("Invalid H(X+Y) - mitm attack attempted?");
return;
}
_tsB = DataHelper.fromLong(hXY_tsB, Hash.HASH_LENGTH, 4);
_tsA = _context.clock().now()/1000;
_tsB = DataHelper.fromLong(hXY_tsB, Hash.HASH_LENGTH, 4); // their (Bob's) timestamp in seconds
_tsA = _context.clock().now()/1000; // our (Alice's) timestamp in seconds
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"h(X+Y) is correct, tsA-tsB=" + (_tsA-_tsB));
@@ -350,11 +352,11 @@ public class EstablishState {
if (diff >= Router.CLOCK_FUDGE_FACTOR) {
_context.statManager().addRateData("ntcp.invalidOutboundSkew", diff, 0);
_transport.markReachable(_con.getRemotePeer().calculateHash());
_context.shitlist().shitlistRouter(_con.getRemotePeer().calculateHash(), "Outbound clock skew of " + diff);
fail("Clocks too skewed (" + diff + ")", null, true);
_context.shitlist().shitlistRouter(_con.getRemotePeer().calculateHash(), "Outbound clock skew of " + diff + " ms");
fail("Clocks too skewed (" + diff + " ms)", null, true);
return;
} else if (_log.shouldLog(Log.DEBUG)) {
_log.debug(prefix()+"Clock skew: " + diff);
_log.debug(prefix()+"Clock skew: " + diff + " ms");
}
// now prepare and send our response
@@ -453,7 +455,7 @@ public class EstablishState {
System.arraycopy(_prevEncrypted, _prevEncrypted.length-16, nextWriteIV, 0, 16);
byte nextReadIV[] = new byte[16];
System.arraycopy(_e_bobSig, _e_bobSig.length-16, nextReadIV, 0, nextReadIV.length);
_con.finishOutboundEstablishment(_dh.getSessionKey(), 1000*(_tsA-_tsB), nextWriteIV, nextReadIV);
_con.finishOutboundEstablishment(_dh.getSessionKey(), (_tsA-_tsB), nextWriteIV, nextReadIV); // skew in seconds
return;
}
}
@@ -535,11 +537,11 @@ public class EstablishState {
if (diff >= Router.CLOCK_FUDGE_FACTOR) {
_context.statManager().addRateData("ntcp.invalidInboundSkew", diff, 0);
_transport.markReachable(alice.calculateHash());
_context.shitlist().shitlistRouter(alice.calculateHash(), "Clock skew of " + diff);
fail("Clocks too skewed (" + diff + ")", null, true);
_context.shitlist().shitlistRouter(alice.calculateHash(), "Clock skew of " + diff + " ms");
fail("Clocks too skewed (" + diff + " ms)", null, true);
return;
} else if (_log.shouldLog(Log.DEBUG)) {
_log.debug(prefix()+"Clock skew: " + diff);
_log.debug(prefix()+"Clock skew: " + diff + " ms");
}
sendInboundConfirm(alice, tsA);
@@ -548,7 +550,7 @@ public class EstablishState {
_log.debug(prefix()+"e_bobSig is " + _e_bobSig.length + " bytes long");
byte iv[] = new byte[16];
System.arraycopy(_e_bobSig, _e_bobSig.length-16, iv, 0, 16);
_con.finishInboundEstablishment(_dh.getSessionKey(), 1000*(tsA-_tsB), iv, _prevEncrypted);
_con.finishInboundEstablishment(_dh.getSessionKey(), (tsA-_tsB), iv, _prevEncrypted); // skew in seconds
if (_log.shouldLog(Log.INFO))
_log.info(prefix()+"Verified remote peer as " + alice.calculateHash().toBase64());
} else {
@@ -668,7 +670,7 @@ public class EstablishState {
long skewSeconds = (ctx.clock().now()/1000)-now;
if (log.shouldLog(Log.INFO))
log.info("Check info received: our IP: " + ourIP + " our port: " + port
+ " skew: " + skewSeconds);
+ " skew: " + skewSeconds + " s");
} catch (UnknownHostException uhe) {
// ipSize is invalid
if (log.shouldLog(Log.WARN))
@@ -883,4 +885,4 @@ public class EstablishState {
e.printStackTrace();
}
}
}
}

View File

@@ -67,7 +67,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
private boolean _closed;
private NTCPAddress _remAddr;
private RouterIdentity _remotePeer;
private long _clockSkew;
private long _clockSkew; // in seconds
/**
* pending unprepared OutNetMessage instances
*/
@@ -168,7 +168,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
public RouterIdentity getRemotePeer() { return _remotePeer; }
public void setRemotePeer(RouterIdentity ident) { _remotePeer = ident; }
/**
* @param clockSkew alice's clock minus bob's clock (may be negative, obviously, but |val| should
* @param clockSkew alice's clock minus bob's clock in seconds (may be negative, obviously, but |val| should
* be under 1 minute)
*/
public void finishInboundEstablishment(SessionKey key, long clockSkew, byte prevWriteEnd[], byte prevReadEnd[]) {
@@ -360,7 +360,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
}
/**
* @param clockSkew alice's clock minus bob's clock (may be negative, obviously, but |val| should
* @param clockSkew alice's clock minus bob's clock in seconds (may be negative, obviously, but |val| should
* be under 1 minute)
*/
public void finishOutboundEstablishment(SessionKey key, long clockSkew, byte prevWriteEnd[], byte prevReadEnd[]) {
@@ -995,16 +995,16 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
return;
} else {
long newSkew = (ourTs - ts);
if ( (newSkew > Router.CLOCK_FUDGE_FACTOR) || (newSkew < 0-Router.CLOCK_FUDGE_FACTOR) ) {
if (Math.abs(newSkew*1000) > Router.CLOCK_FUDGE_FACTOR) {
if (_log.shouldLog(Log.WARN))
_log.warn("Peer's skew jumped too far (from " + _clockSkew + " to " + newSkew + "): " + toString());
_log.warn("Peer's skew jumped too far (from " + _clockSkew + " s to " + newSkew + " s): " + toString());
_context.statManager().addRateData("ntcp.corruptSkew", newSkew, getUptime());
close();
return;
}
_context.statManager().addRateData("ntcp.receiveMeta", newSkew, getUptime());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received NTCP metadata, old skew of " + _clockSkew + ", new skew of " + newSkew);
_log.debug("Received NTCP metadata, old skew of " + _clockSkew + " s, new skew of " + newSkew + "s.");
_clockSkew = newSkew;
}
}

View File

@@ -15,6 +15,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import java.util.Vector;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.RouterAddress;
@@ -344,6 +345,28 @@ public class NTCPTransport extends TransportImpl {
return active;
}
/**
* Return our peer clock skews on this transport.
* Vector composed of Long, each element representing a peer skew in seconds.
*/
public Vector getClockSkews() {
Vector peers = new Vector();
Vector skews = new Vector();
synchronized (_conLock) {
peers.addAll(_conByIdent.values());
}
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
NTCPConnection con = (NTCPConnection)iter.next();
skews.addElement(new Long (con.getClockSkew()));
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("NTCP transport returning " + skews.size() + " peer clock skews.");
return skews;
}
private static final int NUM_CONCURRENT_READERS = 3;
private static final int NUM_CONCURRENT_WRITERS = 3;
@@ -565,8 +588,9 @@ public class NTCPTransport extends TransportImpl {
buf.append("</td><td>For ").append(DataHelper.formatDuration(readTime));
readingPeers++;
}
buf.append("</td><td>").append(DataHelper.formatDuration(con.getClockSkew()));
buf.append("</td></tr>\n");
offsetTotal = offsetTotal + con.getClockSkew();
buf.append("</td><td>").append(con.getClockSkew());
buf.append("s</td></tr>\n");
out.write(buf.toString());
buf.setLength(0);
}
@@ -576,8 +600,9 @@ public class NTCPTransport extends TransportImpl {
buf.append("<tr><td>").append(peers.size()).append(" peers</td><td>&nbsp;</td><td>").append(DataHelper.formatDuration(totalUptime/peers.size()));
buf.append("</td><td>&nbsp;</td><td>").append(totalSend).append("</td><td>").append(totalRecv);
buf.append("</td><td>").append(formatRate(bpsSend/1024)).append("/").append(formatRate(bpsRecv/1024)).append("KBps");
buf.append("</td><td>&nbsp;</td><td>&nbsp;</td><td>&nbsp;</td><td>&nbsp;</td>");
buf.append("</tr>\n");
buf.append("</td><td>&nbsp;</td><td>&nbsp;</td><td>&nbsp;");
buf.append("</td><td>").append(peers.size() > 0 ? DataHelper.formatDuration(offsetTotal*1000/peers.size()) : "0ms");
buf.append("</td></tr>\n");
}
buf.append("</table>\n");

View File

@@ -332,7 +332,7 @@ public class PeerState {
/** when were the current cipher and MAC keys established/rekeyed? */
public long getKeyEstablishedTime() { return _keyEstablishedTime; }
/** how far off is the remote peer from our clock, in seconds? */
public short getClockSkew() { return _clockSkew; }
public short getClockSkew() { return ( (short) (_clockSkew / 1000)); }
/** what is the current receive second, for congestion control? */
public long getCurrentReceiveSecond() { return _currentReceiveSecond; }
/** when did we last send them a packet? */

View File

@@ -1207,6 +1207,30 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
return active;
}
/**
* Return our peer clock skews on this transport.
* Vector composed of Long, each element representing a peer skew in seconds.
*/
public Vector getClockSkews() {
Vector skews = new Vector();
Vector peers = new Vector();
synchronized (_peersByIdent) {
peers.addAll(_peersByIdent.values());
}
long now = _context.clock().now();
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
PeerState peer = (PeerState)iter.next();
if (now-peer.getLastReceiveTime() > 60*60*1000) continue; // skip old peers
skews.addElement(new Long (peer.getClockSkew()));
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("UDP transport returning " + skews.size() + " peer clock skews.");
return skews;
}
private static UDPTransport __instance;
/** **internal, do not use** */
public static final UDPTransport _instance() { return __instance; }
@@ -1686,7 +1710,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
buf.append("</code></td>");
buf.append("<td valign=\"top\" ><code>");
buf.append(peer.getClockSkew()/1000);
buf.append(peer.getClockSkew());
buf.append("s</code></td>");
offsetTotal = offsetTotal + peer.getClockSkew();
@@ -1783,7 +1807,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
buf.append(formatKBps(bpsIn)).append("KBps/").append(formatKBps(bpsOut));
buf.append("KBps</td>");
buf.append(" <td>").append(numPeers > 0 ? DataHelper.formatDuration(uptimeMsTotal/numPeers) : "0s");
buf.append("</td><td>").append(numPeers > 0 ? DataHelper.formatDuration(offsetTotal/numPeers) : "0ms").append("</td>\n");
buf.append("</td><td>").append(numPeers > 0 ? DataHelper.formatDuration(offsetTotal*1000/numPeers) : "0ms").append("</td>\n");
buf.append(" <td>");
buf.append(numPeers > 0 ? cwinTotal/(numPeers*1024) + "K" : "0K");
buf.append("</td><td>&nbsp;</td>\n");

View File

@@ -296,7 +296,13 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
return;
}
preprocess(preprocessed, offset);
try {
preprocess(preprocessed, offset);
} catch (ArrayIndexOutOfBoundsException aioobe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error preprocessing the messages (offset=" + offset + " start=" + startAt + " through=" + sendThrough + " pending=" + pending.size() + " preproc=" + preprocessed.length);
return;
}
long msgId = sender.sendPreprocessed(preprocessed, rec);
for (int i = 0; i < pending.size(); i++) {

View File

@@ -88,7 +88,7 @@ class BuildHandler {
handled.add(_inboundBuildMessages.remove(_inboundBuildMessages.size()-1));
} else {
// drop any expired messages
long dropBefore = System.currentTimeMillis() - (BuildRequestor.REQUEST_TIMEOUT/2);
long dropBefore = System.currentTimeMillis() - (BuildRequestor.REQUEST_TIMEOUT/4);
do {
BuildMessageState state = (BuildMessageState)_inboundBuildMessages.get(0);
if (state.recvTime <= dropBefore) {
@@ -640,7 +640,7 @@ class BuildHandler {
for (int i = 0; i < _inboundBuildMessages.size(); i++) {
BuildMessageState cur = (BuildMessageState)_inboundBuildMessages.get(i);
long age = System.currentTimeMillis() - cur.recvTime;
if (age >= BuildRequestor.REQUEST_TIMEOUT/2) {
if (age >= BuildRequestor.REQUEST_TIMEOUT/4) {
_inboundBuildMessages.remove(i);
i--;
dropped++;