propagate from branch 'i2p.www' (head 22904ca45ea1c7c298f3f7de9627dacc6f77d013)

to branch 'i2p.www.str4d' (head 7e3d922345edd815f99d9e3b28d58b9eca48b162)
This commit is contained in:
str4d
2016-04-10 22:03:04 +00:00
34 changed files with 2338 additions and 27 deletions

48
create-proposal.sh Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/sh
PROPOSAL_DIR="i2p2www/spec/proposals"
if [ $# -lt 4 ]
then
echo "Usage: ./create-proposal.sh name-in-url \"Title of proposal\" author forum-url [file]"
exit
fi
name=$1
title=$2
author=$3
thread=$4
file=$5
date=`date +%Y-%m-%d`
num=`expr $(expr substr $(ls -r "$PROPOSAL_DIR" | head -n1) 1 3) + 1`
titleline=`printf '%*s' "$(expr length "$title")" | tr ' ' =`
proposal="$PROPOSAL_DIR/$num-$name.rst"
cat >"$proposal" <<EOF
$titleline
$title
$titleline
.. meta::
:author: $author
:created: $date
:thread: $thread
:lastupdated: $date
:status: Draft
.. contents::
Introduction
============
EOF
if [ -f "$file" ]
then
cat "$file" >>"$proposal"
else
echo >>"$proposal"
fi
echo "Proposal created: $proposal"

View File

@@ -110,6 +110,7 @@ GETTEXT_DOMAIN_MAPPING = {
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'pages')
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static')
SPEC_DIR = os.path.join(os.path.dirname(__file__), 'spec')
PROPOSAL_DIR = os.path.join(SPEC_DIR, 'proposals')
BLOG_DIR = os.path.join(os.path.dirname(__file__), 'blog')
MEETINGS_DIR = os.path.join(os.path.dirname(__file__), 'meetings/logs')
SITE_DIR = os.path.join(TEMPLATE_DIR, 'site')

View File

@@ -1,6 +1,7 @@
{%- macro change_lang(lang) -%}
{%- if request.endpoint == 'site_show' -%}{{ url_for('site_show', lang=lang, page=page) }}
{%- elif request.endpoint == 'spec_show' -%}{{ url_for('spec_show', name=name) }}
{%- elif request.endpoint == 'proposal_show' -%}{{ url_for('proposal_show', name=name) }}
{%- elif request.endpoint == 'blog_index' -%}
{%- if category -%}{{ url_for('blog_index', lang=lang, category=category) }}
{%- else -%}{{ url_for('blog_index', lang=lang) }}
@@ -41,6 +42,12 @@
</div>
{%- endmacro -%}
{%- macro render_supercedes(supercedes) -%}
{%- if supercedes and supercedes|length -%}
{% for proposal in supercedes %}<a href="{{ proposal_url(proposal) }}">{{ proposal }}</a>{% if not loop.last %}, {% endif %}{% endfor %}
{%- endif %}
{%- endmacro %}
{%- macro render_categories(categories) -%}
{%- if categories and categories|length -%}
{{ _('Posted in') }} {% for category in categories %}<a href="{{ get_url('blog_index', category=category) }}">{{ category }}</a>{% if not loop.last %}, {% endif %}{% endfor %}

View File

@@ -30,6 +30,7 @@
</ul>
</li>
<li><a href="{{ url_for('spec_index') }}"><div class="menuitem"><span>{{ _('Specifications') }}</span></div></a></li>
<li><a href="{{ url_for('proposal_index') }}"><div class="menuitem"><span>{{ _('Proposals') }}</span></div></a></li>
<li class="has-sub"><div class="menuitem"><span>{{ _('API') }}</span></div>
<ul>
<li><a href="{{ site_url('docs/api/i2ptunnel') }}"><div class="menuitem"><span>I2PTunnel</span></div></a></li>

View File

@@ -1,9 +1,13 @@
{% extends "global/layout.html" %}
{% block title %}I2P Specification Documents{% endblock %}
{% block content %}
<p>
This page provides the specifications for various components of the I2P network
and router software. These are living documents, and the specifications are
updated as modifications are made to the network and software.
updated as modifications are made to the network and software. The proposal
documents that track changes to these specifications can be viewed
<a href="{{ url_for('proposal_index') }}">here</a>.
</p>
<ul><li>
"Last updated" is the last date when the specification given within a document

View File

@@ -0,0 +1,36 @@
{% extends "global/layout.html" %}
{% block title %}I2P Proposal Documents{% endblock %}
{% block content %}
<p>
This page is the central index of proposed changes to the
<a href="{{ url_for('spec_index') }}">I2P specifications</a>.
</p>
<p>{% trans dev='http://'+i2pconv('zzz.i2p')+'/topics/new?forum_id=7-big-topics-ideas-proposals-and-discussion',
trac='http://'+i2pconv('trac.i2p2.i2p')+'/newticket?summary=New%20proposal:%20&type=enhancement&milestone=n/a&component=www/i2p&keywords=review-needed' -%}
To submit a proposal, post it on the <a href="{{ dev }}">development forum</a>
or <a href="{{ trac }}">enter a ticket with the proposal attached</a>.
{%- endtrans %}</p>
<table>
<tr>
<th>Number</th>
<th>Title</th>
<th>Last updated</th>
<th>Status</th>
<th>Link</th>
</tr>
{% for proposal in proposals %}
<tr>
<td>{{ proposal.num }}</td>
<td>{{ proposal.title }}</td>
<td><time>{{ proposal.lastupdated }}</time></td>
<td>{{ proposal.status }}</td>
<td>
<a href="{{ url_for('proposal_show', name=proposal.name) }}">HTML</a> |
<a href="{{ url_for('proposal_show_txt', name=proposal.name) }}">TXT</a>
</td>
</tr>
{% endfor %}
</table>
{% endblock %}

View File

@@ -0,0 +1,34 @@
{% extends "global/layout.html" %}
{%- from "global/macros" import render_supercedes with context -%}
{% block title %}{{ title }}{% endblock %}
{% block content_nav %}
{% autoescape false %}
{{ toc }}
{% endautoescape %}
{% endblock %}
{% block content %}
<dl class="meta">
<dt>Number</dt>
<dd>{{ meta.num }}</dd>
<dt>Author</dt>
<dd>{{ meta.author }}</dd>
<dt>Created</dt>
<dd><time datetime="{{ meta.created }}">{{ meta.created }}</time></dd>
<dt>Thread</dt>
<dd><a href="{{ meta.thread }}">{{ meta.thread }}</a></dd>
<dt>Last updated</dt>
<dd><time datetime="{{ meta.lastupdated }}">{{ meta.lastupdated }}</time></dd>
<dt>Status</dt><dd>{{ meta.status }}</dd>
{% if meta.supercededby -%}
<dt>Superceded by</dt>
<dd><a href="{{ proposal_url(meta.supercededby) }}">{{ meta.supercededby }}</a></dd>
{%- endif %}
{% if meta.supercedes -%}
<dt>Supercedes</dt>
<dd>{{ render_supercedes(meta.supercedes)|safe }}</dd>
{%- endif %}
</dl>
{% autoescape false %}
{{ body }}
{% endautoescape %}
{% endblock %}

View File

@@ -0,0 +1,42 @@
=================
Restricted Routes
=================
.. meta::
:author: zzz
:created: 2008-09-14
:thread: http://zzz.i2p/topics/114
:lastupdated: 2008-10-13
:status: Draft
.. contents::
Introduction
============
Thoughts
========
- Add a new transport "IND" (indirect) which publishes a leaseSet hash in the
RouterAddress structure: "IND: [key=aababababababababb]". This transport bids
the lowest priority when the target router publishes it. To send to a peer via
this transport, fetch the leaseset from a ff peer as usual, and send it
directly to the lease.
- A peer advertising IND must build and maintain a set of tunnels to another
peer. These are not exploratory tunnels and not client tunnels, but a second
set of router tunnels.
- 1-hop is sufficient?
- How to select peers for these tunnels?
- They need to be "non-restricted" but how do you know that? Reachability
mapping? Graph theory, algorithms, data structures may help here. Need to
read up on this. See tunnels TODO.
- If you have IND tunnels then your IND transport must bid (low-priority) to
send messages out these tunnels.
- How to decide to enable building indirect tunnels
- How to implement and test without blowing cover

View File

@@ -0,0 +1,37 @@
=========
Multicast
=========
.. meta::
:author: zzz
:created: 2008-12-08
:thread: http://zzz.i2p/topics/172
:lastupdated: 2009-03-25
:status: Draft
.. contents::
Introduction
============
Basic idea: Send one copy through your outbound tunnel, outbound endpoint
distributes to all the inbound gateways. End-end encryption precluded.
Thoughts
========
- New multicast tunnel message type (delivery type = 0x03)
- Outbound endpoint multicast distribute
- New I2NP Multicast Message type ?
- New I2CP Multicast SendMessageMessage Message type
- Don't encrypt router-router in OutNetMessageOneShotJob (garlic?)
App:
- RTSP Proxy?
Streamr:
- Tune MTU? Or just do it at the app?
- On-demand receive & transmit

View File

@@ -0,0 +1,57 @@
=================
Service Directory
=================
.. meta::
:author: zzz
:created: 2009-01-01
:thread: http://zzz.i2p/topics/180
:lastupdated: 2009-01-06
:status: Draft
.. contents::
Introduction
============
This is similar to a proposal Sponge had a while back on IRC. I don't think he
wrote it up, but his idea was to put it in the netDb. I'm not in favor of that,
but the discussion of the best method of accessing the directory (netDb lookups,
DNS-over-i2p, HTTP, hosts.txt, etc.) I will leave for another day.
I could probably hack this up pretty quickly using HTTP and the collection of
perl scripts I use for the add key form.
Directory Interface
===================
Here's how an app would interface with the directory:
REGISTER
- DestKey
- List of Protocol/Service pairs:
- Protocol (optional, default: HTTP)
- Service (optional, default: website)
- ID (optional, default: none)
- Hostname (optional)
- Expiration (default: 1 day? 0 for delete)
- Sig (using privkey for dest)
Returns: success or failure
Updates allowed
LOOKUP
- Hash or key (optional). ONE of:
- 80-bit partial hash
- 256-bit full hash
- full destkey
- Protocol/service pair (optional)
Returns: success, failure, or (for 80-bit) collision.
If success, returns signed descriptor above.

View File

@@ -0,0 +1,37 @@
====================
Bigger I2NP Messages
====================
.. meta::
:author: zzz
:created: 2009-04-05
:thread: http://zzz.i2p/topics/258
:lastupdated: 2009-05-27
:status: Draft
.. contents::
Introduction
============
iMule's use of 12KB datagrams exposed lots of problems. The actual limit today
is more like 10KB.
Thoughts
========
To do:
- Increase NTCP limit - not so easy?
- More session tag quantity tweaks. May hurt max window size? Are there stats to
look at? Make the number variable based on how many we think they need? Can
they ask for more? ask for a quantity?
- Investigate increasing SSU max size (by increasing MTU?)
- Lots of testing
- Finally check in the fragmenter improvements? - Need to do comparison testing
first!

View File

@@ -0,0 +1,26 @@
=============
TLS Transport
=============
.. meta::
:author: zzz
:created: 2009-05-03
:thread: http://zzz.i2p/topics/287
:lastupdated: 2009-05-03
:status: Draft
.. contents::
Introduction
============
It's a frequently-suggested-suggestion that we have a snoop-resistant transport
so that we are resistant to fingerprinting and blocking by ISPs and state-level
adversaries, like what Tor has (i.e. tries to look like a Firefox HTTPS
session).
Proposal
========
TBD

View File

@@ -0,0 +1,86 @@
=============================
Name Translation for GarliCat
=============================
.. meta::
:author: Bernhard R. Fischer
:created: 2009-12-04
:thread: http://zzz.i2p/topics/453
:lastupdated: 2009-12-04
:status: Draft
.. contents::
Current Translation Mechanism
=============================
GarliCat (GC) performs name translation for setting up connections to other GC
nodes. This name translation is just a recoding of the binary representation of
an address into the Base32 encoded form. Thus, translation works back and
forth.
Those addresses are chosen to be 80 bits long. This is because Tor uses 80 bit
long values for addressing its hidden services. Thus, OnionCat (which is GC for
Tor) works with Tor without further intervention.
Unfortunately (in respect to this addressing scheme), I2P uses 256 bit long
values for addressing of its services. As already mentioned, GC transcodes
between binary and Base32 encoded form. Due to the nature of GC being a layer 3
VPN, in its binary representation the addresses are defined to be IPv6
addresses which have a total length of 128 bit. Obviously, 256 bit long I2P
addresses do not fit into.
Thus, a second step of name translation becomes necessary:
IPv6 address (binary) -1a-> Base32 address (80 bits) -2a-> I2P address (256 bits)
-1a- ... GC translation
-2a- ... I2P hosts.txt lookup
The current solution is to let the I2P router do the work. This is accomplished
by insertion of the 80 bit Base32 address and its destination (the I2P address)
as a name/value pair into the hosts.txt or privatehosts.txt file of the I2P
router.
This basically works but it depends on a naming service which (IMHO) itself is
in a state of development and not mature enough (especially in respect to name
distribution).
A Scalable Solution
===================
I suggest to change the stages of addressing in respect to I2P (and maybe also
for Tor) in that way that GC does reverse lookups on the IPv6 addresses using
the regular DNS protocol. The reverse zone shall directly contain the 256 bit
I2P address in its Base32 encoded form. This changes the lookup mechanism to a
single step thereby adding further advantages.
IPv6 address (binary) -1b-> I2P address (256 bits)
-1b- ... DNS reverse lookup
DNS lookups within the Internet are known to be information leaks in respect to
anonymity. Thus, those lookups have to be carried out within I2P. This implies
that several DNS services should be around within I2P. As DNS queries are
usually performed by using the UDP protocol, GC itself is needed for data
transport because it does carry UDP packets which I2P natively does not.
Further advantages are associated with DNS:
1) It is a well-known standard protocol, hence, it is continously improved and
many tools (clients, servers, libraries,...) exist.
2) It is a distributed system. It supports the name space being hosted on
serveral servers in parallel by default.
3) It supports cryptography (DNSSEC) which enables authentication of resource
records. This could directly be tied with the keys of a destination.
Future Opportunities
====================
It may be possible that this naming service can also be used to do forward
lookups. This is translating hostnames into I2P addresses and/or IPv6
addresses. But this kind of lookup needs additional investigation because those
lookups are usually done by the locally installed resolver library which uses
regular Internet name servers (e.g. as specified in /etc/resolv.conf on
Unix-like systems). This is different from the reverse lookups of GC that I
explained above.
A further opportunity could be that the I2P address (destination) gets
registered automatically when creating a GC inbound tunnnel. This would greatly
improve the usability.

View File

@@ -0,0 +1,58 @@
================
NTCP Obfuscation
================
.. meta::
:author: zzz
:created: 2010-11-23
:thread: http://zzz.i2p/topics/774
:lastupdated: 2014-01-03
:status: Rejected
:supercededby: 111
.. contents::
Introduction
============
NTCP data is encrypted after the first message (and the first message appears to
be random data), thus preventing protocol identification through "payload
analysis". It is still vulnerable to protocol identification through "flow
analysis". That's because the first 4 messages (i.e. the handshake) are fixed
length (288, 304, 448, and 48 bytes).
By adding random amounts of random data to each of the messages, we can make it
a lot harder.
Modifications to NTCP
=====================
This is fairly heavyweight but it prevents any detection by DPI equipment.
The following data will be added to the end of the 288-byte message 1:
- A 514-byte ElGamal encrypted block
- Random padding
The ElG block is encrypted to Bob's public key. When decrypted to 222 bytes, it
contains:
- 214 bytes random padding
- 4 bytes 0 reserved
- 2 bytes padding length to follow
- 2 bytes protocol version and flags
In messages 2-4, the last two bytes of the padding will now indicate the length
of more padding to follow.
Note that the ElG block does not have perfect forward secrecy but there's
nothing interesting in there.
We could modify our ElG library so it will encrypt smaller data sizes if we
think 514 bytes is way too much? Is ElG encryption for each NTCP setup too much?
Support for this would be advertised in the netdb RouterAddress with the option
"version=2". If only 288 bytes are received in Message 1, Alice is assumed to be
version 1 and no padding is sent in subsequent messages. Note that communication
could be blocked if a MITM fragmented IP to 288 bytes (very unlikely according
to Brandon).

View File

@@ -0,0 +1,68 @@
=========================
BEP9 Information Recovery
=========================
.. meta::
:author: sponge
:created: 2011-02-23
:thread: http://zzz.i2p/topics/860
:lastupdated: 2011-02-23
:status: Draft
.. contents::
Problem
=======
BEP9 does not send the entire torrent file, thus losing several important
dictionary items, and changes the torrent files total SHA1. This is bad for
maggot links, and bad because important information is lost. Tracker lists,
comments, and any additional data is gone. A way to recover this information is
important, and it needs to add as little as possible to the torrent file. It
also must not be circular dependent. Recovery information should not affect
current clients in any way. torrents that are trackerless (tracker URL is
literally 'trackerless') do not contain the extra field, as they are specific to
using the maggot protocol of discovery and download, which does not ever lose
the information in the first place.
Solution
========
All that needs to be done is to compress the information that would be lost, and
store it in the info dictionary.
Implementation
--------------
1. Generate the normal info dictionary.
2. Generate the main dictionary, and leave out the info entry.
3. Bencode, and compress he main dictionary with gzip.
4. Add the compressed main dictionary to the info dictionary.
5. Add info to the main dictionary.
6. Write the torrent file
Recovery
--------
1. Decompress the recovery entry in the info dict.
2. Bendecode the recovery entry.
3. Add info to the recovered dictionary.
4. For maggot-aware clients, you can now verify that the SHA1 is correct.
5. Write out the recovered torrent file.
Discussion
==========
Using the above outlined method, the size of the torrent increase is very small,
200 to 500 bytes is typical. Robert will be shipping with the new info
dictionary entry creation, and it will not be able to be turned off. Here is the
structure::
main dict {
Tracker strings, comments, etc...
info : {
gzipped main bencoded dict minus the info dictionary and all other
usual info
}
}

View File

@@ -0,0 +1,30 @@
===========================
Multipath TCP for Streaming
===========================
.. meta::
:author: hottuna
:created: 2012-08-26
:thread: http://zzz.i2p/topics/1221
:lastupdated: 2012-08-26
:status: Draft
.. contents::
Introduction
============
Client tunnels are used by the streaming lib in a fairly standard TCP manner.
It would be preferable to allow a multipath TCP-like solution, where client
tunnels are used based on individual characteristics like:
- Latency
- Capacity
- Availability
Proposal
========
TBD

View File

@@ -0,0 +1,35 @@
============
PT Transport
============
.. meta::
:author: zzz
:created: 2014-01-09
:thread: http://zzz.i2p/topics/1551
:lastupdated: 2014-09-28
:status: Draft
.. contents::
Introduction
============
The general idea is to use Pluggable Transports (PTs) as an I2P transport for
communication between routers. It would be an easy way to experiment with
alternative protocols, and get ready for I2P blocking resistance.
Thoughts
========
There are a few potential layers of implementation:
1. A generic PT that implements SOCKS and ExtORPort and configures and forks the
in and out processes, and registers with the comm system. This layer knows
nothing about NTCP, and it may or may not use NTCP. Good for testing.
2. Building on 1), a generic NTCP PT that builds on the NTCP code and funnels
NTCP to 1).
3. Building on 2), a specific NTCP-xxxx PT configured to run a given external in
and out process.

View File

@@ -0,0 +1,46 @@
==========
LeaseSet 2
==========
.. meta::
:author: zzz
:created: 2014-01-22
:thread: http://zzz.i2p/topics/1560
:lastupdated: 2016-04-04
:status: Draft
.. contents::
Introduction
============
The end-to-end cryptography used through I2P tunnels has separate encryption and
signing keys. The signing keys are in the tunnel Destination, which has already
been extended with KeyCertificates to support newer signature types. However,
the encryption keys are part of the LeaseSet, which doesn't contain any
Certificates. It is therefore necessary to implement a new LeaseSet format, and
add support for storing it in the netDb.
A silver lining is that once LS2 is implemented, all existing Destinations can
make use of more modern encryption types; routers that can fetch and read a LS2
will be guaranteed to have support for any encryption types introduced alongside
it.
Format
======
The basic LS2 format would be like this:
- dest
- published timestamp (8 bytes)
- expires (8 bytes)
- subtype (1 byte) (regular, encrypted, meta, or service)
- flags (2 bytes)
- subtype-specific part:
- encryption type, encryption key, and leases for regular
- blob for encrypted
- properties, hashes, ports, revocations, etc. for service
- signature

View File

@@ -0,0 +1,209 @@
======
NTCP 2
======
.. meta::
:author: zzz
:created: 2014-02-13
:thread: http://zzz.i2p/topics/1577
:lastupdated: 2014-09-21
:status: Draft
:supercedes: 106
.. contents::
Introduction
============
NTCP data is encrypted after the first message (and the first message appears to
be random data), thus preventing protocol identification through "payload
analysis". It is still vulnerable to protocol identification through "flow
analysis". That's because the first 4 messages (i.e. the handshake) are fixed
length (288, 304, 448, and 48 bytes).
By adding random amounts of random data to each of the messages, we can make it
a lot harder.
Goals
=====
- Support NTCP 1 and 2 on a single port, auto-detect.
- Add random padding to all NTCP messages including handshake and data messages
(i.e. length obfuscation so all messages aren't a multiple of 16 bytes)
- Obfuscate the contents of messages that aren't encrypted (1 and 2), sufficiently
so that DPI boxes can't classify them. Also ensure that the messages going to
a single peer or set of peers do not have a similar pattern of bits
- Fix loss of bits in DH due to Java format (ticket #1112)
- Add "probing resistance" (as Tor calls it), this includes replay resistance
- Add options/version in handshake for future extensibility
- Add resistance to malicious MitM segmentation if possible
- Don't add significantly to CPU required for connection setup
- Minimize changes
Router Address
==============
Transport identifier is "NTCP" as before.
Routers would publish "ver=1,2" in the Router Address (not the Router Info)
if they support both NTCP 1 and NTCP 2 on the same port.
That's what we would do in Java.
"ver=1" is NTCP 1 only. This is the default if no "ver" is present.
"ver=2" is NTCP 2 only. This can't be used for a long time, as it's not
backwards-compatible. But sometime in the future, implementers could
support version 2 only.
Alternative: Make it something easier to parse, where it's the integer
representation of a bitfield. ver=3 means you support version 1 and 2.
ver=7 means you support versions 1, 2, and 3.
Messages
========
1) Session Request
------------------
Message 1 is obfuscated with random padding,
and the options block is AES-encrypted with Bob's (publicly known) router hash
as a cheap form of obfuscation.
There is no requirement that the session request be unbreakably encrypted,
e.g. with Bob's encryption key, as there's nothing secret in here and that would be
too expensive.
current:
- 256 byte X
- 32 byte H(x) ^ H(RI)
proposed:
- 16 byte MAC
- 16 byte AES-encrypted options block
- 1 byte protocol version (2)
- 3 bytes options (nothing now, all 0)
- 2 byte DH type (implies length of X)
0. Old ElG with leading zero (256 bytes) (unused in NTCP 2)
1. New ElG without leading zero (256 bytes)
2. ECDH? 25519?
- 2 byte block/stream cipher type
0. AES CBC
1. Salsa20? ChaCha?
- 4 byte timestamp (seconds since epoch, wrap around in 2038)
- 2 bytes unused, set to 0
- 2 byte padding count beyond X, to a minimum packet size of 289 bytes
- DH X (256 bytes or as implied by DH type)
- Random padding bytes as specified, to a minimum of 289 bytes.
No requirement for total message size to be a multiple of 16.
Options block is AES ECB encrypted with Bob's 32-byte router hash as the key.
This is the only portion of the message that is encrypted.
MAC: Standard 16-byte HMAC-MD5 (not the nonstandard one we use in SSU)
MAC covers only the options block.
MAC key is the first 16 bytes of Bob's router hash.
Encrypt-then-MAC.
To determine if incoming message is version 1 or version 2:
Method 1
Read 32 bytes.
If the MAC is good then assume it is version 2, otherwise it is version 1.
There's a tiny chance the MAC could be good but it's really version 1.
Method 2
Read 288 bytes.
If there is a 289th byte pending, assume it is version 2, otherwise it is version 1.
This method is vulnerable to MiTM segmentation at 288 bytes.
Timestamp is used for replay detection. Keep a cache of recent MACs for a time period,
reject duplicates, and reject timestamps beyond the cache lifetime or too far in future.
2) Session Created
------------------
The only change is adding a variable amount of padding at the end.
TODO: Replace this with the full spec
- Y type and length as specified in message 1
- The last 16 bytes of Y are used as the IV.
- Take the (former) first two padding bytes and make them the number
of padding bytes to follow, 0 - 65535
- Padding up to the first multiple of 16 (0-15 bytes) is required and encrypted.
- Padding after that is not encrypted, not used for next IV,
no requirement for total message size to be a multiple of 16.
- The last 16 encrypted bytes are used as the next IV in message 4
3) Session Confirm A
--------------------
The only change is adding a variable amount of padding at the end.
TODO: Replace this with the full spec
- The last 16 bytes of X from message 1 are used as the IV.
- Take the (former) first two padding bytes and make them the number
of padding bytes to follow after the sig, 0 - 65535
- Then pad with 0-15 bytes so that the message through the signature is a multiple of 16 bytes.
- Then the signature
- Padding after that is not encrypted, not used for next IV,
no requirement for total message size to be a multiple of 16.
- The last 16 encrypted bytes are used as the next IV in the first data transfer.
4) Session Confirm B
--------------------
The only change is adding a variable amount of padding at the end.
TODO: Replace this with the full spec
- The last 16 bytes of the encrypted contents of message 2 are used as the IV.
- Take the (former) first two padding bytes and make them the number
of padding bytes to follow, 0 - 65535
- Padding up to the first multiple of 16 (0-15 bytes) is required and encrypted.
- Padding after that is not encrypted, not used for next IV,
no requirement for total message size to be a multiple of 16.
- The last 16 encrypted bytes are used as the next IV in the first data transfer.
5) Data Packets
---------------
Add non-mod-16 padding after the checksum:
- Old:
- 2 byte data length
- Data
- Padding to multiple of 16 (including checksum)
- 4 byte checksum
- New:
- 2 byte data length
- Data
- 2 byte post-checksum padding count, 0-65535
- 0-15 bytes Padding to multiple of 16 (including checksum)
- 4 byte checksum
- Random Padding (unencrypted, not used in IV, not covered by checksum)
Alternatives
============
- Poly1305 instead of HMAC-MD5?
- Something else instead of AES for obfuscating the options block in message 1?
- ECDH or 25519 DH instead of ElG DH?
- Salsa20 (or derivatives) instead of AES?
When we add support for any new DH or block/stream cipher types,
we will have to bump the advertised version in the Router Address.

View File

@@ -0,0 +1,28 @@
========================
LeaseSet Key Persistence
========================
.. meta::
:author: zzz
:created: 2014-12-13
:thread: http://zzz.i2p/topics/1770
:lastupdated: 2014-12-13
:status: Draft
.. contents::
Introduction
============
In 0.9.17 persistence was added for the netDb slicing key, stored in
i2ptunnel.config. This helps prevent some attacks by keeping the same slice
after restart, and it also prevents possible correlation with a router restart.
There's two other things that are even easier to correlate with router restart:
the leaseset encryption and signing keys. These are not currently persisted.
Proposed Changes
================
TBD

View File

@@ -0,0 +1,28 @@
==========================
'Encrypted' Streaming Flag
==========================
.. meta::
:author: orignal
:created: 2015-01-21
:thread: http://zzz.i2p/topics/1795
:lastupdated: 2015-01-21
:status: Draft
.. contents::
Introduction
============
High-loaded apps can encounter a shortage of ElGamal/AES+SessionTags tags.
Proposal
========
Add a new flag somewhere within the streaming protocol. If a packets comes with
this flag it means payload is AES encrypted by key from private key and peer's
public key. That would allow to eliminated garlic (ElGamal/AES) encryption and
shortage of tags problem.
May be set per packet or per stream through SYN.

View File

@@ -0,0 +1,45 @@
====================================
Batch Multiple Data Cloves in Garlic
====================================
.. meta::
:author: orignal
:created: 2015-01-22
:thread: http://zzz.i2p/topics/1797
:lastupdated: 2015-01-22
:status: Draft
.. contents::
Introduction
============
Required Changes
================
The changes would be in OCMOSJ and related helper classes, and in
ClientMessagePool. As there is no queue now, a new queue and some delay would be
necessary. Any batching would have to honor a max garlic size to minimize
dropping. Perhaps 3KB? Would want to instrument things first to measure how
often this would get used.
This is backward-compatible, as the garlic receiver will already process all
cloves it receives.
Thoughts
========
It is unclear whether this will have any useful effect, as streaming already
does batching and selects optimum MTU. Batching would increase message size and
exponential drop probability.
The exception is uncompressed content, gzipped at the I2CP layer. But HTTP
traffic is already compressed at higher layer, and Bittorrent data is usually
uncompressible. What does this leave? I2pd doesn't currently do the x-i2p-gzip
compression so it may help there a lot more. But stated goal of not running out
of tags is better fixed with proper windowing implementation in his streaming
library.

View File

@@ -0,0 +1,60 @@
=================================
Prefer Nearby Routers in Keyspace
=================================
.. meta::
:author: chisquare
:created: 2015-04-25
:thread: http://zzz.i2p/topics/1874
:lastupdated: 2015-04-25
:status: Draft
.. contents::
Introduction
============
This is an idea to improve tunnel build success, by organizing peers so that
they prefer connecting to other peers that are close to them in keyspace.
Required Changes
================
This change would require:
1. Every router prefer connections near them in the keyspace.
2. Every router be aware that every router prefers connections near them in
the keyspace.
Advantages for Tunnel Building
==============================
If you build a tunnel::
A -long-> B -short-> C -short-> D
(long/random vs short hop in keyspace), you can guess where the tunnel build
probably failed and try a different peer at that point. In addition, it would
allow you to detect denser parts in key space and have routers just not use them
since it may be someone colluding.
If you build a tunnel::
A -long-> B -long-> C -short-> D
and it fails, you can infer that it was more likely failing at C -> D and you
can choose another D hop.
You can also build tunnels so that the OBEP is closer to the IBGW and use those
tunnels with OBEP that are closer to the given IBGW in a LeaseSet.
Security Implications
=====================
If you randomize the placement of short vs long hops in the keyspace, an
attacker probably won't get much of an advantage.
The biggest downside though is it may make user enumeration a bit easier.

View File

@@ -0,0 +1,31 @@
============================
Opt-in Statistics Collection
============================
.. meta::
:author: zab
:created: 2015-11-04
:thread: http://zzz.i2p/topics/1981
:lastupdated: 2015-11-04
:status: Draft
.. contents::
Introduction
============
Currently there are several network parameters which have been determined by
educated guessing. It is suspected that some of those can be tweaked to improve
the overall performance of the network in terms of speed, reliability and so on.
However, changing them without proper research is very risky.
Proposal
========
The router supports vast collection of stats which can be used to analyze
network-wide properties. What we need is an automated reporting system which
collects those stats in a centralized place. Naturally, this would be opt-in as
it pretty much destroys anonymity. (The privacy-friendly stats are already
reported to stats.i2p) As a ballpark figure, for a network of size 30,000 a
sample of 300 reporting routers should be representative enough.

View File

@@ -0,0 +1,458 @@
================
I2PControl API 2
================
.. meta::
:author: hottuna
:created: 2016-01-23
:thread: http://zzz.i2p/topics/2030
:lastupdated: 2016-02-01
:status: Draft
.. contents::
Introduction
============
This page will outline the API2 for i2pcontrol
Developer headsup!
------------------
All RPC paramters will now be lower case. This *will* break backwards
compatibility with API1 implementations. The reasons for this is to provide
users of >=API2 with simplest most coherent possible API.
API 2
=====
.. raw:: html
{% highlight lang='json' -%}
{
"id": "id",
"method": "method_name",
"params": {
"token": "auth_token",
"method_param": "method_parameter_value",
},
"jsonrpc": "2.0"
}
{
"id": "id",
"result": "result_value",
"jsonrpc": "2.0"
}
{% endhighlight %}
Parameters
----------
"id"
The id number or the request.
Used to identify which reply was spawn by which request.
"method_name"
The name of the RPC that is being invoked.
"auth_token"
The session authentication token.
Needs to be supplied with every RPC except for the 'authenticate' call.
"method_parameter_value"
The method parameter.
Used to offer a different flavors of a method. Like 'get', 'set' and flavors
like that.
"result_value"
The value that the RPC returns. Its type and contents depends on the method
and which method.
Prefixes
--------
The RPC naming scheme is similar to how it's done in CSS, with vendor prefixes
for the different API implementations (i2p, kovri, i2pd)::
XXX.YYY.ZZZ
i2p.XXX.YYY.ZZZ
i2pd.XXX.YYY.ZZZ
kovri.XXX.YYY.ZZZ
The overall idea with vendor-specific prefixes is to allow for some wiggle room
and let implementations innovate without having to wait for every other
implementation to catch up. If a RPC is implemented by all implementations its
multiple prefixes can be removed and it can be included as a core RPC in the
next API version.
Method reading guide
--------------------
* **rpc.method**
* *parameter* [type of parameter]: [null], [number], [string], [boolean],
[array] or [object]. [object] being a {key:value} map.
::
"return_value" [string] // This is the value returned by the RPC call
Methods
-------
* **authenticate** - Given that a correct password is provided, this method provides you with a token for further access and a list of supported API levels.
* *password* [string]: The password for this i2pcontrol implementation
::
[object]
{
"token" : [string], // The token to be used be supplied with all other RPC methods
"api" : [[int],[int], ...] // A list of supported API levels.
}
* **control.** - Control i2p
* **control.reseed** - Start reseeding
* [nil]: No parameter needed
::
[nil]
* **control.restart** - Restart i2p instance
* [nil]: No parameter needed
::
[nil]
* **control.restart.graceful** - Restart i2p instance gracefully
* [nil]: No parameter needed
::
[nil]
* **control.shutdown** - Shut down i2p instance
* [nil]: No parameter needed
::
[nil]
* **control.shutdown.graceful** - Shut down i2p instance gracefully
* [nil]: No parameter needed
::
[nil]
* **control.update.find** - **BLOCKING** Search for signed updates
* [nil]: No parameter needed
::
true [boolean] // True iff signed update is available
* **control.update.start** - Start update process
* [nil]: No parameter needed
::
[nil]
* **i2pcontrol.** - Configure i2pcontrol
* **i2pcontrol.address** - Get/Set the ip address that i2pcontrol listens to.
* *get* [null]: This parameter does not need to be set.
::
"0.0.0.0" [string]
* *set* [string]: This will be an ip address like "0.0.0.0" or "192.168.0.1"
::
[nil]
* **i2pcontrol.password** - Change the i2pcontrol password.
* *set* [string]: Set the new password to this string
::
[nil]
* **i2pcontrol.port** - Get/Set the port that i2pcontrol listens to.
* *get* [null]: This parameter does not need to be set.
::
7650 [number]
* *set* [number]: Change the port that i2pcontrol listens to to this port
::
[nil]
* **settings.** - Get/Set i2p instance settings
* **settings.advanced** - Advanced settings
* *get* [string]: Get the value of this setting
::
"setting-value" [string]
* *getAll* [null]:
::
[object]
{
"setting-name" : "setting-value", [string]
".." : ".."
}
* *set* [string]: Set the value of this setting
* *setAll* [object] {"setting-name" : "setting-value", ".." : ".." }
::
[nil]
* **settings.bandwidth.in** - Inbound bandwidth settings
* **settings.bandwidth.out** - Outbound bandwidth settings
* *get* [nil]: This parameter does not need to be set.
::
0 [number]
* *set* [number]: Set the bandwidth limit
::
[nil]
* **settings.ntcp.autoip** - Get IP auto detection setting for NTCP
* *get* [null]: This parameter does not need to be set.
::
true [boolean]
* **settings.ntcp.hostname** - Get NTCP hostname
* *get* [null]: This parameter does not need to be set.
::
"0.0.0.0" [string]
* *set* [string]: Set new hostname
::
[nil]
* **settings.ntcp.port** - NTCP port
* *get* [null]: This parameter does not need to be set.
::
0 [number]
* *set* [number]: Set new NTCP port.
::
[nil]
* *set* [boolean]: Set NTCP IP auto detection
::
[nil]
* **settings.ssu.autoip** - Configure IP auto detection setting for SSU
* *get* [nil]: This parameter does not need to be set.
::
true [boolean]
* **settings.ssu.hostname** - Configure SSU hostname
* *get* [null]: This parameter does not need to be set.
::
"0.0.0.0" [string]
* *set* [string]: Set new SSU hostname
::
[nil]
* **settings.ssu.port** - SSU port
* *get* [null]: This parameter does not need to be set.
::
0 [number]
* *set* [number]: Set new SSU port.
::
[nil]
* *set* [boolean]: Set SSU IP auto detection
::
[nil]
* **settings.share** - Get bandwidth share percentage
* *get* [null]: This parameter does not need to be set.
::
0 [number] // Bandwidth share percentage (0-100)
* *set* [number]: Set bandwidth share percentage (0-100)
* **settings.upnp** - Enable or disable UPNP
* *get* [nil]: This parameter does not need to be set.
::
true [boolean]
* *set* [boolean]: Set SSU IP auto detection
::
[nil]
* **stats.** - Get stats from the i2p instance
* **stats.advanced** - This method provides access to all stats kept within the instance.
* *get* [string]: Name of the advanced stat to be provided
* *Optional:* *period* [number]: The period for the requested stat
* **stats.knownpeers** - Returns the number of known peers
* **stats.uptime** - Returns the time in ms since the router started
* **stats.bandwidth.in** - Returns the inbound bandwidth (ideally for the last second)
* **stats.bandwidth.in.total** - Returns the number of bytes received since last restart
* **stats.bandwidth.out** - Returns the outbound bandwidth (ideally for the last second)'
* **stats.bandwidth.out.total** - Returns the number of bytes sent since last restart'
* **stats.tunnels.participating** - Returns the number of tunnels participated in currently
* **stats.netdb.peers.active** - Returns the number of peers we've recently communicated with
* **stats.netdb.peers.fast** - Returns the number of 'fast' peers
* **stats.netdb.peers.highcapacity** - Returns the number of 'high capacity' peers
* **stats.netdb.peers.known** - Returns the number of known peers
* *get* [null]: This parameter does not need to be set.
::
0.0 [number]
* **status.** - Get i2p instance status
* **status.router** - Get router status
* *get* [null]: This parameter does not need to be set.
::
"status" [string]
* **status.net** - Get router network status
* *get* [null]: This parameter does not need to be set.
::
0 [number]
/**
* 0 OK
* 1 TESTING
* 2 FIREWALLED
* 3 HIDDEN
* 4 WARN_FIREWALLED_AND_FAST
* 5 WARN_FIREWALLED_AND_FLOODFILL
* 6 WARN_FIREWALLED_WITH_INBOUND_TCP
* 7 WARN_FIREWALLED_WITH_UDP_DISABLED
* 8 ERROR_I2CP
* 9 ERROR_CLOCK_SKEW
* 10 ERROR_PRIVATE_TCP_ADDRESS
* 11 ERROR_SYMMETRIC_NAT
* 12 ERROR_UDP_PORT_IN_USE
* 13 ERROR_NO_ACTIVE_PEERS_CHECK_CONNECTION_AND_FIREWALL
* 14 ERROR_UDP_DISABLED_AND_TCP_UNSET
*/
* **status.isfloodfill** - Is the i2p instance currently a floodfill
* *get* [null]: This parameter does not need to be set.
::
true [boolean]
* **status.isreseeding** - Is the i2p instance currently reseeding
* *get* [null]: This parameter does not need to be set.
::
true [boolean]
* **status.ip** - Public IP detected of this i2p instance
* *get* [null]: This parameter does not need to be set.
::
"0.0.0.0" [string]

View File

@@ -0,0 +1,58 @@
=====================
Bidirectional Tunnels
=====================
.. meta::
:author: orignal
:created: 2016-01-07
:thread: http://zzz.i2p/topics/2041
:lastupdated: 2016-01-07
:status: Draft
.. contents::
Introduction
============
i2pd is going to introduce bi-directional tunnels build through other i2pd
routers only for now. For the network their will appear as regular inbound and
outbound tunnels.
Goals
=====
1. Reduce network and CPU usage by reducing number of TunnelBuild messages
2. Ability to know instantly if a participant has gone away.
3. More accurate profiling and stats
4. Use other darknets as intermediate peers
Tunnel modifications
====================
TunnelBuild
-----------
Tunnels are built the same way as inbound tunnels. No reply message is required.
There is special type of participant called "entrance" marked by flag, serving
as IBGW and OBEP at the same time. Message has the same format as
VaribaleTunnelBuild but ClearText contains different fields::
in_tunnel_id
out_tunnel_id
in_next_tunnel_id
out_next_tunnel_id
in_next_ident
out_next_ident
layer_key, iv_key
It will also contain field mentioning what darknet next peer belong to and some
additional information if it's not I2P.
TunnelTermination
-----------------
If peer want to go away it creates TunnelTermination messages encrypts with
layer key and send in "in" direction. If a participant receive such message it
encrypts it over with it's layer key and send to next peer. Once a messsage
reaches tunnel owner it's start decrypting peer-by-peer until gets unencrypted
message. It finds out which peer has gone away and terminate tunnel.

View File

@@ -0,0 +1,64 @@
=============================
Meta-LeaseSet for Multihoming
=============================
.. meta::
:author: zzz
:created: 2016-01-09
:thread: http://zzz.i2p/topics/2045
:lastupdated: 2016-01-11
:status: Draft
.. contents::
Introduction
============
Multihoming is a hack and presumably won't work for e.g. facebook.i2p at scale.
Say we had 100 multihomes each with 16 tunnels, that's 1600 LS publishes every
10 minutes, or almost 3 per second. The floodfills would get overwhelmed and
throttles would kick in. And that's before we even mention the lookup traffic.
We need some sort of meta-LS, where the LS lists the 100 real LS hashes. This
would be long-lived, a lot longer than 10 minutes. So it's a two-stage lookup
for the LS, but the first stage could be cached for hours.
Format
======
::
- Destination
- Published Time stamp
- Expiration
- Flags
- Properties
- Number of entries
- Number of revocations
- Entries. Each entry contains:
- Hash
- Flags
- Expiration
- Cost (priority)
- Properties
- Revocations. Each revocation contains:
- Hash
- Flags
- Expiration
- Signature
Flags and properties are included for maximum flexibility.
Comments
========
This could then be generalized to be a service lookup of any type. The service
identifier is a SHA256 hash.
For even more massive scalability, we could have multiple levels, i.e. a meta-LS
could point to other meta-LSes.

View File

@@ -0,0 +1,68 @@
==================
Encrypted LeaseSet
==================
.. meta::
:author: zzz
:created: 2016-01-11
:thread: http://zzz.i2p/topics/2047
:lastupdated: 2016-01-12
:status: Draft
.. contents::
Introduction
============
Current encrypted LS is horrendous and insecure. I can say that, I designed and
implemented it.
Reasons:
- AES CBC encrypted
- Single AES key for everybody
- Lease expirations still exposed
- Encryption pubkey still exposed
Goals
=====
- Make entire thing opaque
- Keys for each recipient
Strategy
========
Do like GPG/OpenPGP does. Asymmetrically encrypt a symmetric key for each
recipient. Data is decrypted with that asymmetric key. See e.g. [RFC-4880-S5.1]_
IF we can find an algo that's small and fast.
LS2 contents
------------
- Destination
- Published timestamp
- Expiration
- Flags
- Length of data
- Encrypted data
- Signature
Encrypted data could be prefixed with some enctype specifier, or not.
Trick is finding an asymmetric encryption that's small and fast. ElGamal at 514
bytes is a little painful here. We can do better.
See e.g. http://security.stackexchange.com/questions/824...
This works for small numbers of recipients (or actually, keys; you can still
distribute keys to multiple people if you like).
References
==========
.. [RFC-4880-S5.1]
https://tools.ietf.org/html/rfc4880#section-5.1

View File

@@ -0,0 +1,64 @@
==============
Service Lookup
==============
.. meta::
:author: zzz
:created: 2016-01-13
:thread: http://zzz.i2p/topics/2048
:lastupdated: 2016-01-13
:status: Draft
.. contents::
Introduction
============
This is the full-monty bombastic anything-goes-in-the-netdb proposal. AKA
anycast. This would be the 4th proposed LS2 subtype.
Say you wanted to advertise your destination as an outproxy, or a GNS node, or a
Tor gateway, or a Bittorrent DHT or imule or i2phex or Seedless bootstrap, etc.
You could store this information in the netDB instead of using a separate
bootstrapping or information layer.
There's nobody in charge so unlike with massive multihoming, you can't have a
signed authoritative list. So you would just publish your record to a floodfill.
The floodfill would aggregate these and send them as a response to queries.
Example
=======
Say your service was "GNS". You would send a database store to the floodfill:
- Hash of "GNS"
- destination
- publish timestamp
- expiration (0 for revocation)
- port
- signature
When somebody did a lookup, they would get back a list of those records:
- Hash of "GNS"
- Floodfill's hash
- Timestamp
- number of records
- List of records
- signature of floodfill
Expirations would be relatively long, hours at least.
Considerations
==============
The downside is that this could turn into the Bittorrent DHT or worse. At a
minimum, the floodfills would have to severely rate- and capacity-limit the
stores and queries. We could whitelist approved service names for higher limits.
We could also ban non-whitelisted services completely.
Of course, even today's netDB is open to abuse. You can store arbitrary data in
the netDB, as long as it looks like a RI or LS and the signature verifies. But
this would make it a lot easier.

View File

@@ -0,0 +1,413 @@
=================
New netDB Entries
=================
.. meta::
:author: zzz
:created: 2016-01-16
:thread: http://zzz.i2p/topics/2051
:lastupdated: 2016-01-16
:status: Draft
.. contents::
Introduction
============
This is an update and aggregation of the following 4 proposals:
- LS2
- Encrypted LS2
- Meta LS2 for massive multihoming
- Unauthenticated service lookup (anycasting)
These proposals are mostly independent, but for sanity we define and use a
common format for several of them.
Proposal
========
This proposal defines 5 new DatabaseEntry types and the process for
storing them to and retrieving them from the network database,
as well as the method for signing them and verifying those signatures.
Justification
-------------
LS2 adds fields for changing encryption type and for future protocol changes.
Encrypted LS2 fixes several security issues with the existing encrypted LS by
using asymmetric encryption of the entire set of leases.
Meta LS2 provides flexible, efficient, effective, and large-scale multihoming.
Service Record and Service List provide anycast services such as naming lookup
and DHT bootstrapping.
Existing types:
0: RI
1: LS
New types:
2: LS2
3: Encrypted LS2
4: Meta LS2
5: Service Record
6: Service List
Lookup/Store process
--------------------
Types 2-4 may be returned in response to a standard leaseset lookup (type 1).
Type 5 is never returned in response to a lookup.
Types 6 is returned in response to a new service lookup type (type 2).
Format
------
Types 2-5 all have a common format::
Standard LS2 Header:
- Destination (387+ bytes)
- Published timestamp (8 bytes)
- Expires (4 bytes) (offset from published in ms)
- Flags (2 bytes) (see details for each type below)
Type-Specific Part
- as defined below
Standard LS2 Signature:
- Signature (40+ bytes)
Type 6 (Service List) is an aggregation of several Service Records and has a
different format. See below.
New DatabaseEntry types
=======================
LeaseSet 2
----------
Changes from existing LeaseSet:
- Add published timestamp, expires timestamp, flags, and properties
- Add encryption type
- Remove revocation key
Lookup with:
Standard LS flag (1)
Store with:
Standard LS2 type (2)
Typical expiration:
10 minutes, as in a regular LS.
Published by:
Destination
Format
``````
::
Standard LS2 Header:
- Destination (387+ bytes)
- Published timestamp (8 bytes)
- Expires (4 bytes) (offset from published in ms)
- Flags (2 bytes)
Standard LS2 Type-Specific Part
- Encryption type (2 bytes)
- Encryption key (256 bytes or depending on enc type)
- Number of leases (1 byte)
- Leases (44 bytes each)
- Properties (2 bytes if none)
Standard LS2 Signature:
- Signature (40+ bytes)
Flag definition::
Bit order: 15 14 ... 2 1 0
Bit 0: If 0, a standard published leaseset.
If 1, an unpublished leaseset. Should not be flooded, published, or
sent in response to a query. If this leaseset expires, do not query the
netdb for a new one.
Bits 1-15: Unused, set to 0 for compatibility with future uses.
Properties is for future use, no current plans.
Notes
`````
- Should we reduce the 8-byte expiration in leases to a 2-byte offset from the
published timestamp in seconds? Or 4-byte offset in milliseconds?
- If we ever implement revocation, we can do it with an expires field of zero,
or zero leases, or both. No need for a separate revocation key.
Encrypted LS2
-------------
Changes from existing encrypted LeaseSet:
- Encrypt the whole thing for security
- Securely encrypt, not with AES only.
- Encrypt to each recipient
Lookup with:
Standard LS flag (1)
Store with:
Encrypted LS2 type (3)
Typical expiration:
10 minutes, as in a regular LS.
Published by:
Destination
Format
``````
::
Standard LS2 Header:
- Destination (387+ bytes)
- Published timestamp (8 bytes)
- Expires (4 bytes) (offset from published in ms)
- Flags (2 bytes)
Encrypted LS2 Type-Specific Part
- Length of encrypted data (2 bytes)
- Encrypted data
Format TBD and application-specific.
When decrypted, the LS2 Type-Specific part
Standard LS2 Signature:
- Signature (40+ bytes)
Flags: for future use
The signature is of everything above, i.e. the encrypted data.
Notes
`````
- For multiple clients, encrypted format is probably like GPG/OpenPGP does.
Asymmetrically encrypt a symmetric key for each recipient. Data is decrypted
with that asymmetric key. See e.g. [RFC-4880-S5.1]_ IF we can find an
algorithm that's small and fast.
- Can we use a shortened version of our current ElGamal, which is 222 bytes
in and 514 bytes out? That's a little long for each record.
- For a single client, we could just ElG encrypt the whole leaseset, 514 bytes
isn't so bad.
- If we want to specify the encryption format in the clear, we could have an
identifier just before the encrypted data, or in the flags.
- A service using encrypted leasesets would publish the encrypted version to the
floodfills. However, for efficiency, it would send unencrypted leasesets to
clients in the wrapped garlic message, once authenticated (via whitelist, for
example).
- Floodfills may limit the max size to a reasonable value to prevent abuse.
Meta LS2
--------
This is used to replace multihoming. Like any leaseset, this is signed by the
creator. This is an authenticated list of destination hashes.
It contains a number of entries, each pointing to a LS, LS2, or another Meta LS2
to support massive multihoming.
Lookup with:
Standard LS flag (1)
Store with:
Meta LS2 type (4)
Typical expiration:
Hours to days
Published by:
"master" Destination or coordinator
Format
``````
::
Standard LS2 Header:
- Destination (387+ bytes)
- Published timestamp (8 bytes)
- Expires (4 bytes) (offset from published in ms)
- Flags (2 bytes)
Meta LS2 Type-Specific Part
- Number of entries (1 byte)
- Entries. Each entry contains: (39 bytes)
- Hash (32 bytes)
- Flags (2 bytes)
- Expires (4 bytes) (offset from published in ms)
- Cost (priority) (1 byte)
- Number of revocations (1 byte)
- Revocations: Each revocation contains: (32 bytes)
- Hash (32 bytes)
- Properties (2 bytes if empty)
Standard LS2 Signature:
- Signature (40+ bytes)
Flags and properties: for future use
Notes
`````
- A distributed service using this would have one or more "masters" with the
private key of the service destination. They would (out of band) determine the
current list of active destinations and would publish the Meta LS2. For
redundancy, multiple masters could multihome (i.e. concurrently publish) the
Meta LS2.
- A distributed service could start with a single destination or use old-style
multihoming, then transition to a Meta LS2. A standard LS lookup could return
any one of a LS, LS2, or Meta LS2.
- When a service uses a Meta LS2, it has no tunnels (leases).
Service Record
--------------
This is an individual record saying that a destination is participating in a
service. It is sent from the participant to the floodfill. It is not ever sent
individually by a floodfill, but only as a part of a Service List. The Service
Record is also used to revoke participation in a service, by setting the
expiration to zero.
This is not a LS2 but it uses the standard LS2 header and signature format.
Lookup with:
n/a, see Service List
Store with:
Service Record type (5)
Typical expiration:
Hours
Published by:
Destination
Format
``````
::
Standard LS2 Header:
- Destination (387+ bytes)
- Published timestamp (8 bytes)
- Expires (4 bytes) (offset from published in ms, all zeros for revocation)
- Flags (2 bytes)
Service Record Type-Specific Part
- Port (2 bytes) (0 if unspecified)
- Hash of service name (32 bytes)
Standard LS2 Signature:
- Signature (40+ bytes)
Flags: for future use
Notes
`````
- If expires is all zeros, the floodfill should revoke the record and no longer
include it in the service list.
- Storage: The floodfill may strictly throttle storage of these records and
limit the number of records stored per hash and their expiration. A whilelist
of hashes may also be used.
Service List
------------
This is nothing like a LS2 and uses a different format.
The service list is created and signed by the floodfill. It is unauthenticated
in that anybody can join a service by publishing a Service Record to a
floodfill.
A Service List contains Short Service Records, not full Service Records. These
contain signatures but only hashes, not full destinations, so they cannot be
verified without the full destination.
Lookup with:
Service List lookup type (2)
Store with:
Service List type (6)
Typical expiration:
Hours, not specified in the list itself, up to local policy
Published by:
Nobody, never sent to floodfill, never flooded.
Format
``````
::
- Hash of the service name (implicit, in the Database Store message)
- Hash of the Creator (floodfill) (32 bytes)
- Timestamp (8 bytes)
- Number of Short Service Records (1 byte)
- List of Short Service Records:
Each Short Service Record contains (90+ bytes)
- Dest hash (32 bytes)
- Published timestamp (8 bytes)
- Expires (4 bytes) (offset from published in ms)
- Flags (2 bytes)
- Port (2 bytes)
- Sig length (2 bytes)
- Signature of dest (40+ bytes)
- Number of Revocation Records (1 byte)
- List of Revocation Records:
Each Revocation Record contains (86+ bytes)
- Dest hash (32 bytes)
- Published timestamp (8 bytes)
- Flags (2 bytes)
- Port (2 bytes)
- Sig length (2 bytes)
- Signature of dest (40+ bytes)
- Signature of floodfill (40+ bytes)
To verify signature of the Service List:
- prepend the hash of the service name
- remove the hash of the creator
- Check signature of the modified contents
To verify signature of each Short Service Record:
- Fetch destination
- Check signature of (published timestamp + expires + flags + port + Hash of
service name)
To verify signature of each Revocation Record:
- Fetch destination
- Check signature of (published timestamp + 4 zero bytes + flags + port + Hash
of service name)
Notes
`````
- We use signature length instead of sigtype so we can support unknown signature
types.
- There is no expiration of a service list, recipients may make their own
decision based on policy or the expiration of the individual records.
- Service Lists are not flooded, only individual Service Records are. Each
floodfill creates, signs, and caches a Service List. The floodfill uses its
own policy for cache time and the maximum number of service and revocation
records.
References
==========
.. [RFC-4880-S5.1]
https://tools.ietf.org/html/rfc4880#section-5.1

View File

@@ -0,0 +1,42 @@
====================================
OBEP Delivery to One-of-Many Tunnels
====================================
.. meta::
:author: zzz
:created: 2016-03-10
:thread: http://zzz.i2p/topics/2099
:lastupdated: 2016-03-10
:status: Draft
.. contents::
Introduction
============
To reduce connection congestion, give the OBEP a list of id/hash pairs (i.e.
leases) to deliver the message to rather than just one. The OBEP would select
one of those to deliver to. The OBEP would select, if available, one that it is
already connected to, or already knows about.
The originator (OBGW) would stick some (all?) of the target leases in the
delivery instructions instead of picking just one.
This would make the OBEP-IBGW path faster and more reliable, and reduce overall
network connections.
Proposal
========
We have one unused delivery type (0x03) and two remaining bits 0 and 1) in the
flags. Because we've previously discussed multicast at the OBEP (deliver to all
specified leases), we could plan for that feature as well at the same time.
So the specification proposal is::
Flag byte:
Delivery type 0x03: count byte and multiple id/hash pairs follow
Bit 0: 0 to deliver to one of the tunnels; 1 to deliver to all of the tunnels
Count byte: 2-255 number of id/hash pairs to follow (36 bytes each)
That many id/hash pairs (36 bytes each)
rest of delivery instructions unchanged

View File

@@ -1,9 +1,12 @@
import codecs
from docutils import io
from docutils.core import (
Publisher,
publish_doctree,
publish_from_doctree,
publish_parts,
)
from docutils.readers.doctree import Reader
from flask import (
abort,
g,
@@ -17,7 +20,7 @@ from flask import (
)
import os.path
from i2p2www import SPEC_DIR
from i2p2www import PROPOSAL_DIR, SPEC_DIR
from i2p2www import helpers
@@ -26,7 +29,6 @@ SPEC_METATAGS = {
'category': '',
'lastupdated': None,
}
SPEC_LIST_METATAGS = [
]
SPEC_CATEGORY_SORT = {
@@ -36,12 +38,42 @@ SPEC_CATEGORY_SORT = {
'': 999,
}
PROPOSAL_METATAGS = {
'author': u'I2P devs',
'created': None,
'lastupdated': None,
'status': u'Draft',
'supercededby': None,
'supercedes': None,
'thread': None,
}
PROPOSAL_LIST_METATAGS = [
'supercedes',
]
PROPOSAL_STATUS_SORT = {
'Draft': 1,
'Rejected': 100,
'': 999,
}
def spec_index():
specs = []
for f in os.listdir(SPEC_DIR):
METATAG_LABELS = {
'accuratefor': u'Accurate for',
'author': u'Author',
'category': u'Category',
'created': u'Created',
'lastupdated': u'Last updated',
'status': u'Status',
'supercededby': u'Superceded by',
'supercedes': u'Supercedes',
'thread': u'Thread',
}
def get_rsts(directory, meta_parser):
rsts = []
for f in os.listdir(directory):
if f.endswith('.rst'):
path = safe_join(SPEC_DIR, f)
path = safe_join(directory, f)
# read file header
header = ''
with codecs.open(path, encoding='utf-8') as fd:
@@ -49,22 +81,32 @@ def spec_index():
header += line
if not line.strip():
break
parts = publish_parts(source=header, source_path=SPEC_DIR, writer_name="html")
meta = get_metadata_from_meta(parts['meta'])
parts = publish_parts(source=header, source_path=directory, writer_name="html")
meta = meta_parser(parts['meta'])
spec = {
rst = {
'name': f[:-4],
'title': parts['title'],
}
spec.update(meta)
specs.append(spec)
rst.update(meta)
rsts.append(rst)
return rsts
def spec_index():
specs = get_rsts(SPEC_DIR, spec_meta)
specs.sort(key=lambda s: (SPEC_CATEGORY_SORT[s['category']], s['title']))
return render_template('spec/index.html', specs=specs)
def spec_show(name, txt=False):
def proposal_index():
proposals = get_rsts(PROPOSAL_DIR, proposal_meta)
for i in range(0, len(proposals)):
proposals[i]['num'] = int(proposals[i]['name'][:3])
proposals.sort(key=lambda s: (PROPOSAL_STATUS_SORT[s['status']], s['num']))
return render_template('spec/proposal-index.html', proposals=proposals)
def render_rst(directory, name, meta_parser, template):
# check if that file actually exists
path = safe_join(SPEC_DIR, name + '.rst')
path = safe_join(directory, name + '.rst')
if not os.path.exists(path):
abort(404)
@@ -72,7 +114,7 @@ def spec_show(name, txt=False):
with codecs.open(path, encoding='utf-8') as fd:
content = fd.read()
if txt:
if not template:
# Strip out RST
content = content.replace('.. meta::\n', '')
content = content.replace('.. contents::\n\n', '')
@@ -82,16 +124,15 @@ def spec_show(name, txt=False):
content = content.replace(']_', '] ')
# Change highlight formatter
content = content.replace('{% highlight', "{% highlight formatter='textspec'")
# Other string changes
content = content.replace(' :accuratefor', '- Accurate for')
content = content.replace(' :category', '- Category')
content = content.replace(' :lastupdated', '- Last updated')
# Metatags
for (metatag, label) in METATAG_LABELS.items():
content = content.replace(' :%s' % metatag, label)
# render the post with Jinja2 to handle URLs etc.
rendered_content = render_template_string(content)
rendered_content = rendered_content.replace('</pre></div>', ' </pre></div>')
if txt:
if not template:
# Send response
r = make_response(rendered_content)
r.mimetype = 'text/plain'
@@ -102,19 +143,40 @@ def spec_show(name, txt=False):
bullet_list = doctree[1][1]
doctree.clear()
doctree.append(bullet_list)
toc = publish_from_doctree(doctree, writer_name='html')
reader = Reader(parser_name='null')
pub = Publisher(reader, None, None,
source=io.DocTreeInput(doctree),
destination_class=io.StringOutput)
pub.set_writer('html')
pub.publish()
toc = pub.writer.parts['fragment']
# Remove the ToC from the main document
rendered_content = rendered_content.replace('.. contents::\n', '')
# publish the spec with docutils
parts = publish_parts(source=rendered_content, source_path=SPEC_DIR, writer_name="html")
meta = get_metadata_from_meta(parts['meta'])
parts = publish_parts(source=rendered_content, source_path=directory, writer_name="html")
meta = meta_parser(parts['meta'])
return render_template('spec/show.html', title=parts['title'], toc=toc, body=parts['fragment'], name=name, meta=meta)
if (directory == PROPOSAL_DIR):
meta['num'] = int(name[:3])
return render_template(template, title=parts['title'], toc=toc, body=parts['fragment'], name=name, meta=meta)
def spec_show(name):
return render_rst(SPEC_DIR, name, spec_meta, 'spec/show.html')
def spec_show_txt(name):
return spec_show(name, True)
return render_rst(SPEC_DIR, name, spec_meta, None)
def get_metadata_from_meta(meta):
def proposal_show(name):
return render_rst(PROPOSAL_DIR, name, proposal_meta, 'spec/proposal-show.html')
def proposal_show_txt(name):
return render_rst(PROPOSAL_DIR, name, proposal_meta, None)
def spec_meta(meta):
return helpers.get_metadata_from_meta(meta, SPEC_METATAGS, SPEC_LIST_METATAGS)
def proposal_meta(meta):
return helpers.get_metadata_from_meta(meta, PROPOSAL_METATAGS, PROPOSAL_LIST_METATAGS)

View File

@@ -2,7 +2,17 @@ import ctags
from flask import g, request, safe_join, url_for
import os.path
from i2p2www import CANONICAL_DOMAIN, CURRENT_I2P_VERSION, RTL_LANGS, SUPPORTED_LANGS, SUPPORTED_LANG_NAMES, SPEC_DIR, STATIC_DIR, app
from i2p2www import (
CANONICAL_DOMAIN,
CURRENT_I2P_VERSION,
PROPOSAL_DIR,
RTL_LANGS,
SUPPORTED_LANGS,
SUPPORTED_LANG_NAMES,
SPEC_DIR,
STATIC_DIR,
app,
)
INPROXY = '.xyz' # http://zzz.i2p/topics/1771-i2p-xyz-inproxy
@@ -46,6 +56,20 @@ def utility_processor():
url = url[:url.index('?')]
return url
def get_proposal_url(identifier):
name = None
for f in os.listdir(PROPOSAL_DIR):
if f.startswith(identifier):
name = f[:-4]
break
if not name:
return ''
url = url_for('proposal_show', name=name, _external=True)
# Remove ?lang=xx
if '?' in url:
url = url[:url.index('?')]
return url
def get_ctags_url(value):
filename, kind = _lookup_ctag(value)
# Handle message types
@@ -149,6 +173,7 @@ def utility_processor():
logo_url=get_logo_for_theme,
site_url=get_site_url,
spec_url=get_spec_url,
proposal_url=get_proposal_url,
ctags_url=get_ctags_url,
get_url=get_url_with_lang,
is_rtl=is_rtl_lang,

View File

@@ -47,6 +47,9 @@ url('/<lang:lang>/<path:page>', 'views.site_show')
url('/spec', 'spec.views.spec_index')
url('/spec/<string:name>', 'spec.views.spec_show')
url('/spec/<string:name>.txt', 'spec.views.spec_show_txt')
url('/spec/proposals', 'spec.views.proposal_index')
url('/spec/proposals/<string:name>', 'spec.views.proposal_show')
url('/spec/proposals/<string:name>.txt', 'spec.views.proposal_show_txt')
url('/<lang:lang>/papers/', 'anonbib.views.papers_list')
url('/<lang:lang>/papers/bibtex', 'anonbib.views.papers_bibtex')