[tbb-commits] [tor-browser-build/master] Bug 34043: Update snowflake to persist sessions across proxies
gk at torproject.org
gk at torproject.org
Wed May 13 12:42:39 UTC 2020
commit 8547db1c629adf1bbdabcb44e2245944920472e6
Author: David Fifield <david at bamsoftware.com>
Date: Mon Feb 10 16:06:08 2020 -0700
Bug 34043: Update snowflake to persist sessions across proxies
This brings in changes related to
#33745 Merge a turbotunnel branch
#33897 Remove buffering from WebRTCPeer
#34042 Reduce DataChannelTimeout
The new dependencies kcp-go and smux form the new inner session layer.
There's a patch to kcp-go to remove dependencies required for features
we don't use.
---
projects/goxnet/config | 2 +
projects/kcp-go/config | 24 +
projects/kcp-go/remove-crypt-fec.patch | 1019 ++++++++++++++++++++++++++++++++
projects/smux/config | 16 +
projects/snowflake/build | 3 +
projects/snowflake/config | 6 +-
6 files changed, 1069 insertions(+), 1 deletion(-)
diff --git a/projects/goxnet/config b/projects/goxnet/config
index 5394875..7656795 100644
--- a/projects/goxnet/config
+++ b/projects/goxnet/config
@@ -11,6 +11,8 @@ var:
use_container: 1
go_lib: golang.org/x/net
go_lib_install:
+ - golang.org/x/net/ipv4
+ - golang.org/x/net/ipv6
- golang.org/x/net/proxy
- golang.org/x/net/dns/dnsmessage
go_lib_deps:
diff --git a/projects/kcp-go/config b/projects/kcp-go/config
new file mode 100644
index 0000000..7551a5e
--- /dev/null
+++ b/projects/kcp-go/config
@@ -0,0 +1,24 @@
+version: '[% c("abbrev") %]'
+git_url: https://github.com/xtaci/kcp-go
+git_hash: 831b5b267373f6fbd3548849a3925c4e70806de2 # v5.5.11
+filename: '[% project %]-[% c("version") %]-[% c("var/osname") %]-[% c("var/build_id") %].tar.gz'
+
+build: '[% c("projects/go/var/build_go_lib") %]'
+
+var:
+ container:
+ use_container: 1
+ go_lib: github.com/xtaci/kcp-go
+ go_lib_deps:
+ - goerrors
+ - goxnet
+
+input_files:
+ - project: container-image
+ - name: go
+ project: go
+ - name: goerrors
+ project: goerrors
+ - name: goxnet
+ project: goxnet
+ - filename: remove-crypt-fec.patch
diff --git a/projects/kcp-go/remove-crypt-fec.patch b/projects/kcp-go/remove-crypt-fec.patch
new file mode 100644
index 0000000..5693ac5
--- /dev/null
+++ b/projects/kcp-go/remove-crypt-fec.patch
@@ -0,0 +1,1019 @@
+From 0b9d0759f979a5d828b747ea51771f307c53d221 Mon Sep 17 00:00:00 2001
+From: David Fifield <david at bamsoftware.com>
+Date: Thu, 9 Apr 2020 11:27:44 -0600
+Subject: [PATCH] Remove crypt and FEC dependencies.
+
+---
+ crypt.go | 618 -----------------------------------------------------
+ fec.go | 337 -----------------------------
+ removed.go | 29 +++
+ 3 files changed, 29 insertions(+), 955 deletions(-)
+ delete mode 100644 crypt.go
+ delete mode 100644 fec.go
+ create mode 100644 removed.go
+
+diff --git a/crypt.go b/crypt.go
+deleted file mode 100644
+index d882852..0000000
+--- a/crypt.go
++++ /dev/null
+@@ -1,618 +0,0 @@
+-package kcp
+-
+-import (
+- "crypto/aes"
+- "crypto/cipher"
+- "crypto/des"
+- "crypto/sha1"
+- "unsafe"
+-
+- xor "github.com/templexxx/xorsimd"
+- "github.com/tjfoc/gmsm/sm4"
+-
+- "golang.org/x/crypto/blowfish"
+- "golang.org/x/crypto/cast5"
+- "golang.org/x/crypto/pbkdf2"
+- "golang.org/x/crypto/salsa20"
+- "golang.org/x/crypto/tea"
+- "golang.org/x/crypto/twofish"
+- "golang.org/x/crypto/xtea"
+-)
+-
+-var (
+- initialVector = []byte{167, 115, 79, 156, 18, 172, 27, 1, 164, 21, 242, 193, 252, 120, 230, 107}
+- saltxor = `sH3CIVoF#rWLtJo6`
+-)
+-
+-// BlockCrypt defines encryption/decryption methods for a given byte slice.
+-// Notes on implementing: the data to be encrypted contains a builtin
+-// nonce at the first 16 bytes
+-type BlockCrypt interface {
+- // Encrypt encrypts the whole block in src into dst.
+- // Dst and src may point at the same memory.
+- Encrypt(dst, src []byte)
+-
+- // Decrypt decrypts the whole block in src into dst.
+- // Dst and src may point at the same memory.
+- Decrypt(dst, src []byte)
+-}
+-
+-type salsa20BlockCrypt struct {
+- key [32]byte
+-}
+-
+-// NewSalsa20BlockCrypt https://en.wikipedia.org/wiki/Salsa20
+-func NewSalsa20BlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(salsa20BlockCrypt)
+- copy(c.key[:], key)
+- return c, nil
+-}
+-
+-func (c *salsa20BlockCrypt) Encrypt(dst, src []byte) {
+- salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
+- copy(dst[:8], src[:8])
+-}
+-func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
+- salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
+- copy(dst[:8], src[:8])
+-}
+-
+-type sm4BlockCrypt struct {
+- encbuf [sm4.BlockSize]byte // 64bit alignment enc/dec buffer
+- decbuf [2 * sm4.BlockSize]byte
+- block cipher.Block
+-}
+-
+-// NewSM4BlockCrypt https://github.com/tjfoc/gmsm/tree/master/sm4
+-func NewSM4BlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(sm4BlockCrypt)
+- block, err := sm4.NewCipher(key)
+- if err != nil {
+- return nil, err
+- }
+- c.block = block
+- return c, nil
+-}
+-
+-func (c *sm4BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
+-func (c *sm4BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
+-
+-type twofishBlockCrypt struct {
+- encbuf [twofish.BlockSize]byte
+- decbuf [2 * twofish.BlockSize]byte
+- block cipher.Block
+-}
+-
+-// NewTwofishBlockCrypt https://en.wikipedia.org/wiki/Twofish
+-func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(twofishBlockCrypt)
+- block, err := twofish.NewCipher(key)
+- if err != nil {
+- return nil, err
+- }
+- c.block = block
+- return c, nil
+-}
+-
+-func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
+-func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
+-
+-type tripleDESBlockCrypt struct {
+- encbuf [des.BlockSize]byte
+- decbuf [2 * des.BlockSize]byte
+- block cipher.Block
+-}
+-
+-// NewTripleDESBlockCrypt https://en.wikipedia.org/wiki/Triple_DES
+-func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(tripleDESBlockCrypt)
+- block, err := des.NewTripleDESCipher(key)
+- if err != nil {
+- return nil, err
+- }
+- c.block = block
+- return c, nil
+-}
+-
+-func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
+-func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
+-
+-type cast5BlockCrypt struct {
+- encbuf [cast5.BlockSize]byte
+- decbuf [2 * cast5.BlockSize]byte
+- block cipher.Block
+-}
+-
+-// NewCast5BlockCrypt https://en.wikipedia.org/wiki/CAST-128
+-func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(cast5BlockCrypt)
+- block, err := cast5.NewCipher(key)
+- if err != nil {
+- return nil, err
+- }
+- c.block = block
+- return c, nil
+-}
+-
+-func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
+-func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
+-
+-type blowfishBlockCrypt struct {
+- encbuf [blowfish.BlockSize]byte
+- decbuf [2 * blowfish.BlockSize]byte
+- block cipher.Block
+-}
+-
+-// NewBlowfishBlockCrypt https://en.wikipedia.org/wiki/Blowfish_(cipher)
+-func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(blowfishBlockCrypt)
+- block, err := blowfish.NewCipher(key)
+- if err != nil {
+- return nil, err
+- }
+- c.block = block
+- return c, nil
+-}
+-
+-func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
+-func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
+-
+-type aesBlockCrypt struct {
+- encbuf [aes.BlockSize]byte
+- decbuf [2 * aes.BlockSize]byte
+- block cipher.Block
+-}
+-
+-// NewAESBlockCrypt https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
+-func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(aesBlockCrypt)
+- block, err := aes.NewCipher(key)
+- if err != nil {
+- return nil, err
+- }
+- c.block = block
+- return c, nil
+-}
+-
+-func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
+-func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
+-
+-type teaBlockCrypt struct {
+- encbuf [tea.BlockSize]byte
+- decbuf [2 * tea.BlockSize]byte
+- block cipher.Block
+-}
+-
+-// NewTEABlockCrypt https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
+-func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(teaBlockCrypt)
+- block, err := tea.NewCipherWithRounds(key, 16)
+- if err != nil {
+- return nil, err
+- }
+- c.block = block
+- return c, nil
+-}
+-
+-func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
+-func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
+-
+-type xteaBlockCrypt struct {
+- encbuf [xtea.BlockSize]byte
+- decbuf [2 * xtea.BlockSize]byte
+- block cipher.Block
+-}
+-
+-// NewXTEABlockCrypt https://en.wikipedia.org/wiki/XTEA
+-func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(xteaBlockCrypt)
+- block, err := xtea.NewCipher(key)
+- if err != nil {
+- return nil, err
+- }
+- c.block = block
+- return c, nil
+-}
+-
+-func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
+-func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
+-
+-type simpleXORBlockCrypt struct {
+- xortbl []byte
+-}
+-
+-// NewSimpleXORBlockCrypt simple xor with key expanding
+-func NewSimpleXORBlockCrypt(key []byte) (BlockCrypt, error) {
+- c := new(simpleXORBlockCrypt)
+- c.xortbl = pbkdf2.Key(key, []byte(saltxor), 32, mtuLimit, sha1.New)
+- return c, nil
+-}
+-
+-func (c *simpleXORBlockCrypt) Encrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
+-func (c *simpleXORBlockCrypt) Decrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
+-
+-type noneBlockCrypt struct{}
+-
+-// NewNoneBlockCrypt does nothing but copying
+-func NewNoneBlockCrypt(key []byte) (BlockCrypt, error) {
+- return new(noneBlockCrypt), nil
+-}
+-
+-func (c *noneBlockCrypt) Encrypt(dst, src []byte) { copy(dst, src) }
+-func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
+-
+-// packet encryption with local CFB mode
+-func encrypt(block cipher.Block, dst, src, buf []byte) {
+- switch block.BlockSize() {
+- case 8:
+- encrypt8(block, dst, src, buf)
+- case 16:
+- encrypt16(block, dst, src, buf)
+- default:
+- panic("unsupported cipher block size")
+- }
+-}
+-
+-// optimized encryption for the ciphers which works in 8-bytes
+-func encrypt8(block cipher.Block, dst, src, buf []byte) {
+- tbl := buf[:8]
+- block.Encrypt(tbl, initialVector)
+- n := len(src) / 8
+- base := 0
+- repeat := n / 8
+- left := n % 8
+- ptr_tbl := (*uint64)(unsafe.Pointer(&tbl[0]))
+-
+- for i := 0; i < repeat; i++ {
+- s := src[base:][0:64]
+- d := dst[base:][0:64]
+- // 1
+- *(*uint64)(unsafe.Pointer(&d[0])) = *(*uint64)(unsafe.Pointer(&s[0])) ^ *ptr_tbl
+- block.Encrypt(tbl, d[0:8])
+- // 2
+- *(*uint64)(unsafe.Pointer(&d[8])) = *(*uint64)(unsafe.Pointer(&s[8])) ^ *ptr_tbl
+- block.Encrypt(tbl, d[8:16])
+- // 3
+- *(*uint64)(unsafe.Pointer(&d[16])) = *(*uint64)(unsafe.Pointer(&s[16])) ^ *ptr_tbl
+- block.Encrypt(tbl, d[16:24])
+- // 4
+- *(*uint64)(unsafe.Pointer(&d[24])) = *(*uint64)(unsafe.Pointer(&s[24])) ^ *ptr_tbl
+- block.Encrypt(tbl, d[24:32])
+- // 5
+- *(*uint64)(unsafe.Pointer(&d[32])) = *(*uint64)(unsafe.Pointer(&s[32])) ^ *ptr_tbl
+- block.Encrypt(tbl, d[32:40])
+- // 6
+- *(*uint64)(unsafe.Pointer(&d[40])) = *(*uint64)(unsafe.Pointer(&s[40])) ^ *ptr_tbl
+- block.Encrypt(tbl, d[40:48])
+- // 7
+- *(*uint64)(unsafe.Pointer(&d[48])) = *(*uint64)(unsafe.Pointer(&s[48])) ^ *ptr_tbl
+- block.Encrypt(tbl, d[48:56])
+- // 8
+- *(*uint64)(unsafe.Pointer(&d[56])) = *(*uint64)(unsafe.Pointer(&s[56])) ^ *ptr_tbl
+- block.Encrypt(tbl, d[56:64])
+- base += 64
+- }
+-
+- switch left {
+- case 7:
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
+- block.Encrypt(tbl, dst[base:])
+- base += 8
+- fallthrough
+- case 6:
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
+- block.Encrypt(tbl, dst[base:])
+- base += 8
+- fallthrough
+- case 5:
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
+- block.Encrypt(tbl, dst[base:])
+- base += 8
+- fallthrough
+- case 4:
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
+- block.Encrypt(tbl, dst[base:])
+- base += 8
+- fallthrough
+- case 3:
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
+- block.Encrypt(tbl, dst[base:])
+- base += 8
+- fallthrough
+- case 2:
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
+- block.Encrypt(tbl, dst[base:])
+- base += 8
+- fallthrough
+- case 1:
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
+- block.Encrypt(tbl, dst[base:])
+- base += 8
+- fallthrough
+- case 0:
+- xorBytes(dst[base:], src[base:], tbl)
+- }
+-}
+-
+-// optimized encryption for the ciphers which works in 16-bytes
+-func encrypt16(block cipher.Block, dst, src, buf []byte) {
+- tbl := buf[:16]
+- block.Encrypt(tbl, initialVector)
+- n := len(src) / 16
+- base := 0
+- repeat := n / 8
+- left := n % 8
+- for i := 0; i < repeat; i++ {
+- s := src[base:][0:128]
+- d := dst[base:][0:128]
+- // 1
+- xor.Bytes16Align(d[0:16], s[0:16], tbl)
+- block.Encrypt(tbl, d[0:16])
+- // 2
+- xor.Bytes16Align(d[16:32], s[16:32], tbl)
+- block.Encrypt(tbl, d[16:32])
+- // 3
+- xor.Bytes16Align(d[32:48], s[32:48], tbl)
+- block.Encrypt(tbl, d[32:48])
+- // 4
+- xor.Bytes16Align(d[48:64], s[48:64], tbl)
+- block.Encrypt(tbl, d[48:64])
+- // 5
+- xor.Bytes16Align(d[64:80], s[64:80], tbl)
+- block.Encrypt(tbl, d[64:80])
+- // 6
+- xor.Bytes16Align(d[80:96], s[80:96], tbl)
+- block.Encrypt(tbl, d[80:96])
+- // 7
+- xor.Bytes16Align(d[96:112], s[96:112], tbl)
+- block.Encrypt(tbl, d[96:112])
+- // 8
+- xor.Bytes16Align(d[112:128], s[112:128], tbl)
+- block.Encrypt(tbl, d[112:128])
+- base += 128
+- }
+-
+- switch left {
+- case 7:
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- block.Encrypt(tbl, dst[base:])
+- base += 16
+- fallthrough
+- case 6:
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- block.Encrypt(tbl, dst[base:])
+- base += 16
+- fallthrough
+- case 5:
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- block.Encrypt(tbl, dst[base:])
+- base += 16
+- fallthrough
+- case 4:
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- block.Encrypt(tbl, dst[base:])
+- base += 16
+- fallthrough
+- case 3:
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- block.Encrypt(tbl, dst[base:])
+- base += 16
+- fallthrough
+- case 2:
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- block.Encrypt(tbl, dst[base:])
+- base += 16
+- fallthrough
+- case 1:
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- block.Encrypt(tbl, dst[base:])
+- base += 16
+- fallthrough
+- case 0:
+- xorBytes(dst[base:], src[base:], tbl)
+- }
+-}
+-
+-// decryption
+-func decrypt(block cipher.Block, dst, src, buf []byte) {
+- switch block.BlockSize() {
+- case 8:
+- decrypt8(block, dst, src, buf)
+- case 16:
+- decrypt16(block, dst, src, buf)
+- default:
+- panic("unsupported cipher block size")
+- }
+-}
+-
+-// decrypt 8 bytes block, all byte slices are supposed to be 64bit aligned
+-func decrypt8(block cipher.Block, dst, src, buf []byte) {
+- tbl := buf[0:8]
+- next := buf[8:16]
+- block.Encrypt(tbl, initialVector)
+- n := len(src) / 8
+- base := 0
+- repeat := n / 8
+- left := n % 8
+- ptr_tbl := (*uint64)(unsafe.Pointer(&tbl[0]))
+- ptr_next := (*uint64)(unsafe.Pointer(&next[0]))
+-
+- for i := 0; i < repeat; i++ {
+- s := src[base:][0:64]
+- d := dst[base:][0:64]
+- // 1
+- block.Encrypt(next, s[0:8])
+- *(*uint64)(unsafe.Pointer(&d[0])) = *(*uint64)(unsafe.Pointer(&s[0])) ^ *ptr_tbl
+- // 2
+- block.Encrypt(tbl, s[8:16])
+- *(*uint64)(unsafe.Pointer(&d[8])) = *(*uint64)(unsafe.Pointer(&s[8])) ^ *ptr_next
+- // 3
+- block.Encrypt(next, s[16:24])
+- *(*uint64)(unsafe.Pointer(&d[16])) = *(*uint64)(unsafe.Pointer(&s[16])) ^ *ptr_tbl
+- // 4
+- block.Encrypt(tbl, s[24:32])
+- *(*uint64)(unsafe.Pointer(&d[24])) = *(*uint64)(unsafe.Pointer(&s[24])) ^ *ptr_next
+- // 5
+- block.Encrypt(next, s[32:40])
+- *(*uint64)(unsafe.Pointer(&d[32])) = *(*uint64)(unsafe.Pointer(&s[32])) ^ *ptr_tbl
+- // 6
+- block.Encrypt(tbl, s[40:48])
+- *(*uint64)(unsafe.Pointer(&d[40])) = *(*uint64)(unsafe.Pointer(&s[40])) ^ *ptr_next
+- // 7
+- block.Encrypt(next, s[48:56])
+- *(*uint64)(unsafe.Pointer(&d[48])) = *(*uint64)(unsafe.Pointer(&s[48])) ^ *ptr_tbl
+- // 8
+- block.Encrypt(tbl, s[56:64])
+- *(*uint64)(unsafe.Pointer(&d[56])) = *(*uint64)(unsafe.Pointer(&s[56])) ^ *ptr_next
+- base += 64
+- }
+-
+- switch left {
+- case 7:
+- block.Encrypt(next, src[base:])
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
+- tbl, next = next, tbl
+- base += 8
+- fallthrough
+- case 6:
+- block.Encrypt(next, src[base:])
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
+- tbl, next = next, tbl
+- base += 8
+- fallthrough
+- case 5:
+- block.Encrypt(next, src[base:])
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
+- tbl, next = next, tbl
+- base += 8
+- fallthrough
+- case 4:
+- block.Encrypt(next, src[base:])
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
+- tbl, next = next, tbl
+- base += 8
+- fallthrough
+- case 3:
+- block.Encrypt(next, src[base:])
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
+- tbl, next = next, tbl
+- base += 8
+- fallthrough
+- case 2:
+- block.Encrypt(next, src[base:])
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
+- tbl, next = next, tbl
+- base += 8
+- fallthrough
+- case 1:
+- block.Encrypt(next, src[base:])
+- *(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
+- tbl, next = next, tbl
+- base += 8
+- fallthrough
+- case 0:
+- xorBytes(dst[base:], src[base:], tbl)
+- }
+-}
+-
+-func decrypt16(block cipher.Block, dst, src, buf []byte) {
+- tbl := buf[0:16]
+- next := buf[16:32]
+- block.Encrypt(tbl, initialVector)
+- n := len(src) / 16
+- base := 0
+- repeat := n / 8
+- left := n % 8
+- for i := 0; i < repeat; i++ {
+- s := src[base:][0:128]
+- d := dst[base:][0:128]
+- // 1
+- block.Encrypt(next, s[0:16])
+- xor.Bytes16Align(d[0:16], s[0:16], tbl)
+- // 2
+- block.Encrypt(tbl, s[16:32])
+- xor.Bytes16Align(d[16:32], s[16:32], next)
+- // 3
+- block.Encrypt(next, s[32:48])
+- xor.Bytes16Align(d[32:48], s[32:48], tbl)
+- // 4
+- block.Encrypt(tbl, s[48:64])
+- xor.Bytes16Align(d[48:64], s[48:64], next)
+- // 5
+- block.Encrypt(next, s[64:80])
+- xor.Bytes16Align(d[64:80], s[64:80], tbl)
+- // 6
+- block.Encrypt(tbl, s[80:96])
+- xor.Bytes16Align(d[80:96], s[80:96], next)
+- // 7
+- block.Encrypt(next, s[96:112])
+- xor.Bytes16Align(d[96:112], s[96:112], tbl)
+- // 8
+- block.Encrypt(tbl, s[112:128])
+- xor.Bytes16Align(d[112:128], s[112:128], next)
+- base += 128
+- }
+-
+- switch left {
+- case 7:
+- block.Encrypt(next, src[base:])
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- tbl, next = next, tbl
+- base += 16
+- fallthrough
+- case 6:
+- block.Encrypt(next, src[base:])
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- tbl, next = next, tbl
+- base += 16
+- fallthrough
+- case 5:
+- block.Encrypt(next, src[base:])
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- tbl, next = next, tbl
+- base += 16
+- fallthrough
+- case 4:
+- block.Encrypt(next, src[base:])
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- tbl, next = next, tbl
+- base += 16
+- fallthrough
+- case 3:
+- block.Encrypt(next, src[base:])
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- tbl, next = next, tbl
+- base += 16
+- fallthrough
+- case 2:
+- block.Encrypt(next, src[base:])
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- tbl, next = next, tbl
+- base += 16
+- fallthrough
+- case 1:
+- block.Encrypt(next, src[base:])
+- xor.Bytes16Align(dst[base:], src[base:], tbl)
+- tbl, next = next, tbl
+- base += 16
+- fallthrough
+- case 0:
+- xorBytes(dst[base:], src[base:], tbl)
+- }
+-}
+-
+-// per bytes xors
+-func xorBytes(dst, a, b []byte) int {
+- n := len(a)
+- if len(b) < n {
+- n = len(b)
+- }
+- if n == 0 {
+- return 0
+- }
+-
+- for i := 0; i < n; i++ {
+- dst[i] = a[i] ^ b[i]
+- }
+- return n
+-}
+diff --git a/fec.go b/fec.go
+deleted file mode 100644
+index 97cd40b..0000000
+--- a/fec.go
++++ /dev/null
+@@ -1,337 +0,0 @@
+-package kcp
+-
+-import (
+- "encoding/binary"
+- "sync/atomic"
+-
+- "github.com/klauspost/reedsolomon"
+-)
+-
+-const (
+- fecHeaderSize = 6
+- fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
+- typeData = 0xf1
+- typeParity = 0xf2
+- fecExpire = 60000
+-)
+-
+-// fecPacket is a decoded FEC packet
+-type fecPacket []byte
+-
+-func (bts fecPacket) seqid() uint32 { return binary.LittleEndian.Uint32(bts) }
+-func (bts fecPacket) flag() uint16 { return binary.LittleEndian.Uint16(bts[4:]) }
+-func (bts fecPacket) data() []byte { return bts[6:] }
+-
+-// fecElement has auxcilliary time field
+-type fecElement struct {
+- fecPacket
+- ts uint32
+-}
+-
+-// fecDecoder for decoding incoming packets
+-type fecDecoder struct {
+- rxlimit int // queue size limit
+- dataShards int
+- parityShards int
+- shardSize int
+- rx []fecElement // ordered receive queue
+-
+- // caches
+- decodeCache [][]byte
+- flagCache []bool
+-
+- // zeros
+- zeros []byte
+-
+- // RS decoder
+- codec reedsolomon.Encoder
+-}
+-
+-func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
+- if dataShards <= 0 || parityShards <= 0 {
+- return nil
+- }
+- if rxlimit < dataShards+parityShards {
+- return nil
+- }
+-
+- dec := new(fecDecoder)
+- dec.rxlimit = rxlimit
+- dec.dataShards = dataShards
+- dec.parityShards = parityShards
+- dec.shardSize = dataShards + parityShards
+- codec, err := reedsolomon.New(dataShards, parityShards)
+- if err != nil {
+- return nil
+- }
+- dec.codec = codec
+- dec.decodeCache = make([][]byte, dec.shardSize)
+- dec.flagCache = make([]bool, dec.shardSize)
+- dec.zeros = make([]byte, mtuLimit)
+- return dec
+-}
+-
+-// decode a fec packet
+-func (dec *fecDecoder) decode(in fecPacket) (recovered [][]byte) {
+- // insertion
+- n := len(dec.rx) - 1
+- insertIdx := 0
+- for i := n; i >= 0; i-- {
+- if in.seqid() == dec.rx[i].seqid() { // de-duplicate
+- return nil
+- } else if _itimediff(in.seqid(), dec.rx[i].seqid()) > 0 { // insertion
+- insertIdx = i + 1
+- break
+- }
+- }
+-
+- // make a copy
+- pkt := fecPacket(xmitBuf.Get().([]byte)[:len(in)])
+- copy(pkt, in)
+- elem := fecElement{pkt, currentMs()}
+-
+- // insert into ordered rx queue
+- if insertIdx == n+1 {
+- dec.rx = append(dec.rx, elem)
+- } else {
+- dec.rx = append(dec.rx, fecElement{})
+- copy(dec.rx[insertIdx+1:], dec.rx[insertIdx:]) // shift right
+- dec.rx[insertIdx] = elem
+- }
+-
+- // shard range for current packet
+- shardBegin := pkt.seqid() - pkt.seqid()%uint32(dec.shardSize)
+- shardEnd := shardBegin + uint32(dec.shardSize) - 1
+-
+- // max search range in ordered queue for current shard
+- searchBegin := insertIdx - int(pkt.seqid()%uint32(dec.shardSize))
+- if searchBegin < 0 {
+- searchBegin = 0
+- }
+- searchEnd := searchBegin + dec.shardSize - 1
+- if searchEnd >= len(dec.rx) {
+- searchEnd = len(dec.rx) - 1
+- }
+-
+- // re-construct datashards
+- if searchEnd-searchBegin+1 >= dec.dataShards {
+- var numshard, numDataShard, first, maxlen int
+-
+- // zero caches
+- shards := dec.decodeCache
+- shardsflag := dec.flagCache
+- for k := range dec.decodeCache {
+- shards[k] = nil
+- shardsflag[k] = false
+- }
+-
+- // shard assembly
+- for i := searchBegin; i <= searchEnd; i++ {
+- seqid := dec.rx[i].seqid()
+- if _itimediff(seqid, shardEnd) > 0 {
+- break
+- } else if _itimediff(seqid, shardBegin) >= 0 {
+- shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data()
+- shardsflag[seqid%uint32(dec.shardSize)] = true
+- numshard++
+- if dec.rx[i].flag() == typeData {
+- numDataShard++
+- }
+- if numshard == 1 {
+- first = i
+- }
+- if len(dec.rx[i].data()) > maxlen {
+- maxlen = len(dec.rx[i].data())
+- }
+- }
+- }
+-
+- if numDataShard == dec.dataShards {
+- // case 1: no loss on data shards
+- dec.rx = dec.freeRange(first, numshard, dec.rx)
+- } else if numshard >= dec.dataShards {
+- // case 2: loss on data shards, but it's recoverable from parity shards
+- for k := range shards {
+- if shards[k] != nil {
+- dlen := len(shards[k])
+- shards[k] = shards[k][:maxlen]
+- copy(shards[k][dlen:], dec.zeros)
+- } else if k < dec.dataShards {
+- shards[k] = xmitBuf.Get().([]byte)[:0]
+- }
+- }
+- if err := dec.codec.ReconstructData(shards); err == nil {
+- for k := range shards[:dec.dataShards] {
+- if !shardsflag[k] {
+- // recovered data should be recycled
+- recovered = append(recovered, shards[k])
+- }
+- }
+- }
+- dec.rx = dec.freeRange(first, numshard, dec.rx)
+- }
+- }
+-
+- // keep rxlimit
+- if len(dec.rx) > dec.rxlimit {
+- if dec.rx[0].flag() == typeData { // track the unrecoverable data
+- atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
+- }
+- dec.rx = dec.freeRange(0, 1, dec.rx)
+- }
+-
+- // timeout policy
+- current := currentMs()
+- numExpired := 0
+- for k := range dec.rx {
+- if _itimediff(current, dec.rx[k].ts) > fecExpire {
+- numExpired++
+- continue
+- }
+- break
+- }
+- if numExpired > 0 {
+- dec.rx = dec.freeRange(0, numExpired, dec.rx)
+- }
+- return
+-}
+-
+-// free a range of fecPacket
+-func (dec *fecDecoder) freeRange(first, n int, q []fecElement) []fecElement {
+- for i := first; i < first+n; i++ { // recycle buffer
+- xmitBuf.Put([]byte(q[i].fecPacket))
+- }
+-
+- if first == 0 && n < cap(q)/2 {
+- return q[n:]
+- }
+- copy(q[first:], q[first+n:])
+- return q[:len(q)-n]
+-}
+-
+-// release all segments back to xmitBuf
+-func (dec *fecDecoder) release() {
+- if n := len(dec.rx); n > 0 {
+- dec.rx = dec.freeRange(0, n, dec.rx)
+- }
+-}
+-
+-type (
+- // fecEncoder for encoding outgoing packets
+- fecEncoder struct {
+- dataShards int
+- parityShards int
+- shardSize int
+- paws uint32 // Protect Against Wrapped Sequence numbers
+- next uint32 // next seqid
+-
+- shardCount int // count the number of datashards collected
+- maxSize int // track maximum data length in datashard
+-
+- headerOffset int // FEC header offset
+- payloadOffset int // FEC payload offset
+-
+- // caches
+- shardCache [][]byte
+- encodeCache [][]byte
+-
+- // zeros
+- zeros []byte
+-
+- // RS encoder
+- codec reedsolomon.Encoder
+- }
+-)
+-
+-func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
+- if dataShards <= 0 || parityShards <= 0 {
+- return nil
+- }
+- enc := new(fecEncoder)
+- enc.dataShards = dataShards
+- enc.parityShards = parityShards
+- enc.shardSize = dataShards + parityShards
+- enc.paws = 0xffffffff / uint32(enc.shardSize) * uint32(enc.shardSize)
+- enc.headerOffset = offset
+- enc.payloadOffset = enc.headerOffset + fecHeaderSize
+-
+- codec, err := reedsolomon.New(dataShards, parityShards)
+- if err != nil {
+- return nil
+- }
+- enc.codec = codec
+-
+- // caches
+- enc.encodeCache = make([][]byte, enc.shardSize)
+- enc.shardCache = make([][]byte, enc.shardSize)
+- for k := range enc.shardCache {
+- enc.shardCache[k] = make([]byte, mtuLimit)
+- }
+- enc.zeros = make([]byte, mtuLimit)
+- return enc
+-}
+-
+-// encodes the packet, outputs parity shards if we have collected quorum datashards
+-// notice: the contents of 'ps' will be re-written in successive calling
+-func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
+- // The header format:
+- // | FEC SEQID(4B) | FEC TYPE(2B) | SIZE (2B) | PAYLOAD(SIZE-2) |
+- // |<-headerOffset |<-payloadOffset
+- enc.markData(b[enc.headerOffset:])
+- binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:])))
+-
+- // copy data from payloadOffset to fec shard cache
+- sz := len(b)
+- enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz]
+- copy(enc.shardCache[enc.shardCount][enc.payloadOffset:], b[enc.payloadOffset:])
+- enc.shardCount++
+-
+- // track max datashard length
+- if sz > enc.maxSize {
+- enc.maxSize = sz
+- }
+-
+- // Generation of Reed-Solomon Erasure Code
+- if enc.shardCount == enc.dataShards {
+- // fill '0' into the tail of each datashard
+- for i := 0; i < enc.dataShards; i++ {
+- shard := enc.shardCache[i]
+- slen := len(shard)
+- copy(shard[slen:enc.maxSize], enc.zeros)
+- }
+-
+- // construct equal-sized slice with stripped header
+- cache := enc.encodeCache
+- for k := range cache {
+- cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize]
+- }
+-
+- // encoding
+- if err := enc.codec.Encode(cache); err == nil {
+- ps = enc.shardCache[enc.dataShards:]
+- for k := range ps {
+- enc.markParity(ps[k][enc.headerOffset:])
+- ps[k] = ps[k][:enc.maxSize]
+- }
+- }
+-
+- // counters resetting
+- enc.shardCount = 0
+- enc.maxSize = 0
+- }
+-
+- return
+-}
+-
+-func (enc *fecEncoder) markData(data []byte) {
+- binary.LittleEndian.PutUint32(data, enc.next)
+- binary.LittleEndian.PutUint16(data[4:], typeData)
+- enc.next++
+-}
+-
+-func (enc *fecEncoder) markParity(data []byte) {
+- binary.LittleEndian.PutUint32(data, enc.next)
+- binary.LittleEndian.PutUint16(data[4:], typeParity)
+- // sequence wrap will only happen at parity shard
+- enc.next = (enc.next + 1) % enc.paws
+-}
+diff --git a/removed.go b/removed.go
+new file mode 100644
+index 0000000..5ecf446
+--- /dev/null
++++ b/removed.go
+@@ -0,0 +1,29 @@
++package kcp
++
++// Dummy implementations for types from crypt.go and fec.go, removed to reduce
++// dependencies.
++
++const (
++ fecHeaderSize = 6
++ fecHeaderSizePlus2 = fecHeaderSize + 2
++ typeData = 0xf1
++ typeParity = 0xf2
++)
++
++type (
++ BlockCrypt interface {
++ Encrypt(_, _ []byte)
++ Decrypt(_, _ []byte)
++ }
++ fecDecoder struct{}
++ fecEncoder struct{}
++ fecPacket []byte
++)
++
++func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder { return nil }
++func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder { return nil }
++
++func (_ *fecDecoder) decode(in fecPacket) [][]byte { panic("disabled") }
++func (_ *fecDecoder) release() { panic("disabled") }
++func (_ *fecEncoder) encode(b []byte) [][]byte { panic("disabled") }
++func (_ fecPacket) flag() uint16 { panic("disabled") }
+--
+2.20.1
+
diff --git a/projects/smux/config b/projects/smux/config
new file mode 100644
index 0000000..54d2a11
--- /dev/null
+++ b/projects/smux/config
@@ -0,0 +1,16 @@
+version: '[% c("abbrev") %]'
+git_url: https://github.com/xtaci/smux
+git_hash: c6969d8a76874342611f4b544d05b1590f1a76b1 # v1.5.12
+filename: '[% project %]-[% c("version") %]-[% c("var/osname") %]-[% c("var/build_id") %].tar.gz'
+
+build: '[% c("projects/go/var/build_go_lib") %]'
+
+var:
+ container:
+ use_container: 1
+ go_lib: github.com/xtaci/smux
+
+input_files:
+ - project: container-image
+ - name: go
+ project: go
diff --git a/projects/snowflake/build b/projects/snowflake/build
index 1aede45..2724dd9 100644
--- a/projects/snowflake/build
+++ b/projects/snowflake/build
@@ -8,6 +8,8 @@ mkdir -p $PTDIR $DOCSDIR
tar -C /var/tmp/dist -xf [% c('input_files_by_name/uniuri') %]
tar -C /var/tmp/dist -xf [% c('input_files_by_name/goptlib') %]
tar -C /var/tmp/dist -xf [% c('input_files_by_name/pion-webrtc') %]
+tar -C /var/tmp/dist -xf [% c('input_files_by_name/kcp-go') %]
+tar -C /var/tmp/dist -xf [% c('input_files_by_name/smux') %]
mkdir -p /var/tmp/build
tar -C /var/tmp/build -xf [% project %]-[% c('version') %].tar.gz
@@ -18,6 +20,7 @@ ln -sf "$PWD" "$GOPATH/src/git.torproject.org/pluggable-transports/snowflake.git
# Fix gopath location of versioned dependencies
ln -sf "$GOPATH/src/github.com/pion/webrtc" "$GOPATH/src/github.com/pion/webrtc/v2"
ln -sf "$GOPATH/src/github.com/pion/sdp" "$GOPATH/src/github.com/pion/sdp/v2"
+ln -sf "$GOPATH/src/github.com/xtaci/kcp-go" "$GOPATH/src/github.com/xtaci/kcp-go/v5"
cd client
go build -ldflags '-s'
diff --git a/projects/snowflake/config b/projects/snowflake/config
index b81f1f5..efd0707 100644
--- a/projects/snowflake/config
+++ b/projects/snowflake/config
@@ -1,7 +1,7 @@
# vim: filetype=yaml sw=2
version: '[% c("abbrev") %]'
git_url: https://git.torproject.org/pluggable-transports/snowflake.git
-git_hash: ea01bf41c3011590938b079ed96c7b35cb40588b
+git_hash: 7043a055f9fb0680281ecffd7d458a43f2ce65b5
filename: '[% project %]-[% c("version") %]-[% c("var/osname") %]-[% c("var/build_id") %].tar.gz'
var:
@@ -18,3 +18,7 @@ input_files:
project: goptlib
- name: pion-webrtc
project: pion-webrtc
+ - name: kcp-go
+ project: kcp-go
+ - name: smux
+ project: smux
More information about the tbb-commits
mailing list