1 Star 0 Fork 0

lqinggang/psiphon-tunnel-core

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
osl.go 47.93 KB
一键复制 编辑 原始数据 按行查看 历史
Rod Hynes 提交于 2018-01-12 12:10 . Add authorized access control component
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566
/*
* Copyright (c) 2016, Psiphon Inc.
* All rights reserved.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
// Package osl implements the Obfuscated Server List (OSL) mechanism. This
// mechanism is a method of distributing server lists only to clients that
// demonstrate certain behavioral traits. Clients are seeded with Server
// List Obfuscation Keys (SLOKs) as they meet the configured criteria. These
// keys are stored and later combined to assemble keys to decrypt out-of-band
// distributed OSL files that contain server lists.
//
// This package contains the core routines used in psiphond (to track client
// traits and issue SLOKs), clients (to manage SLOKs and decrypt OSLs), and
// automation (to create OSLs for distribution).
package osl
import (
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/md5"
"crypto/sha256"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/url"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/nacl/secretbox"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/sss"
)
const (
KEY_LENGTH_BYTES = 32
REGISTRY_FILENAME = "osl-registry"
OSL_FILENAME_FORMAT = "osl-%s"
)
// Config is an OSL configuration, which consists of a list of schemes.
// The Reload function supports hot reloading of rules data while the
// process is running.
type Config struct {
common.ReloadableFile
Schemes []*Scheme
}
// Scheme defines a OSL seeding and distribution strategy. SLOKs to
// decrypt OSLs are issued based on client network activity -- defined
// in the SeedSpecs -- and time. OSLs are created for periods of time
// and can be decrypted by clients that are seeded with a sufficient
// selection of SLOKs for that time period. Distribution of server
// entries to OSLs is delegated to automation.
type Scheme struct {
// Epoch is the start time of the scheme, the start time of the
// first OSL and when SLOKs will first be issued. It must be
// specified in UTC and must be a multiple of SeedPeriodNanoseconds.
Epoch string
// Regions is a list of client country codes this scheme applies to.
// If empty, the scheme applies to all regions.
Regions []string
// PropagationChannelIDs is a list of client propagtion channel IDs
// this scheme applies to. Propagation channel IDs are an input
// to SLOK key derivation.
PropagationChannelIDs []string
// MasterKey is the base random key used for SLOK key derivation. It
// must be unique for each scheme. It must be 32 random bytes, base64
// encoded.
MasterKey []byte
// SeedSpecs is the set of different client network activity patterns
// that will result in issuing SLOKs. For a given time period, a distinct
// SLOK is issued for each SeedSpec.
// Duplicate subnets may appear in multiple SeedSpecs.
SeedSpecs []*SeedSpec
// SeedSpecThreshold is the threshold scheme for combining SLOKs to
// decrypt an OSL. For any fixed time period, at least K (threshold) of
// N (total) SLOKs from the N SeedSpecs must be seeded for a client to be
// able to reassemble the OSL key.
// Limitation: thresholds must be at least 2.
SeedSpecThreshold int
// SeedPeriodNanoseconds is the time period granularity of SLOKs.
// New SLOKs are issued every SeedPeriodNanoseconds. Client progress
// towards activity levels is reset at the end of each period.
SeedPeriodNanoseconds int64
// KeySplits is the time period threshold scheme layered on top of the
// SeedSpecThreshold scheme for combining SLOKs to decrypt an OSL.
// There must be at least one level. For one level, any K (threshold) of
// N (total) SeedSpec SLOK groups must be sufficiently seeded for a client
// to be able to reassemble the OSL key. When an additional level is
// specified, then K' of N' groups of N of K SeedSpec SLOK groups must be
// sufficiently seeded. And so on. The first level in the list is the
// lowest level. The time period for OSLs is determined by the totals in
// the KeySplits.
//
// Example:
//
// SeedSpecs = <3 specs>
// SeedSpecThreshold = 2
// SeedPeriodNanoseconds = 100,000,000 = 100 milliseconds
// SeedPeriodKeySplits = [{10, 7}, {60, 5}]
//
// In these scheme, up to 3 distinct SLOKs, one per spec, are issued
// every 100 milliseconds.
//
// Distinct OSLs are paved for every minute (60 seconds). Each OSL
// key is split such that, for those 60 seconds, a client must seed
// 2/3 spec SLOKs for 7 of 10 consecutive 100 ms. time periods within
// a second, for any 5 of 60 seconds within the minute.
//
SeedPeriodKeySplits []KeySplit
// The following fields are ephemeral state.
epoch time.Time
subnetLookups []common.SubnetLookup
derivedSLOKCacheMutex sync.RWMutex
derivedSLOKCache map[slokReference]*SLOK
}
// SeedSpec defines a client traffic pattern that results in a seeded SLOK.
// For each time period, a unique SLOK is issued to a client that meets the
// traffic levels specified in Targets. All upstream port forward traffic to
// UpstreamSubnets is counted towards the targets.
//
// ID is a SLOK key derivation component and must be 32 random bytes, base64
// encoded. UpstreamSubnets is a list of CIDRs. Description is not used; it's
// for JSON config file comments.
type SeedSpec struct {
Description string
ID []byte
UpstreamSubnets []string
Targets TrafficValues
}
// TrafficValues defines a client traffic level that seeds a SLOK.
// BytesRead and BytesWritten are the minimum bytes transferred counts to
// seed a SLOK. Both UDP and TCP data will be counted towards these totals.
// PortForwardDurationNanoseconds is the duration that a TCP or UDP port
// forward is active (not connected, in the UDP case). All threshold
// settings must be met to seed a SLOK; any threshold may be set to 0 to
// be trivially satisfied.
type TrafficValues struct {
BytesRead int64
BytesWritten int64
PortForwardDurationNanoseconds int64
}
// KeySplit defines a secret key splitting scheme where the secret is split
// into n (total) shares and any K (threshold) of N shares must be known
// to recostruct the split secret.
type KeySplit struct {
Total int
Threshold int
}
// ClientSeedState tracks the progress of a client towards seeding SLOKs
// across all schemes the client qualifies for.
type ClientSeedState struct {
propagationChannelID string
seedProgress []*ClientSeedProgress
mutex sync.Mutex
signalIssueSLOKs chan struct{}
issuedSLOKs map[string]*SLOK
payloadSLOKs []*SLOK
}
// ClientSeedProgress tracks client progress towards seeding SLOKs for
// a particular scheme.
type ClientSeedProgress struct {
// Note: 64-bit ints used with atomic operations are placed
// at the start of struct to ensure 64-bit alignment.
// (https://golang.org/pkg/sync/atomic/#pkg-note-BUG)
progressSLOKTime int64
scheme *Scheme
trafficProgress []*TrafficValues
}
// ClientSeedPortForward map a client port forward, which is relaying
// traffic to a specific upstream address, to all seed state progress
// counters for SeedSpecs with subnets containing the upstream address.
// As traffic is relayed through the port forwards, the bytes transferred
// and duration count towards the progress of these SeedSpecs and
// associated SLOKs.
type ClientSeedPortForward struct {
state *ClientSeedState
progressReferences []progressReference
}
// progressReference points to a particular ClientSeedProgress and
// TrafficValues for to update with traffic events for a
// ClientSeedPortForward.
type progressReference struct {
seedProgressIndex int
trafficProgressIndex int
}
// slokReference uniquely identifies a SLOK by specifying all the fields
// used to derive the SLOK secret key and ID.
// Note: SeedSpecID is not a []byte as slokReference is used as a map key.
type slokReference struct {
PropagationChannelID string
SeedSpecID string
Time time.Time
}
// SLOK is a seeded SLOK issued to a client. The client will store the
// SLOK in its local database; look it up by ID when checking which OSLs it
// can reassemble keys for; and use the key material to reassemble OSL
// file keys.
type SLOK struct {
ID []byte
Key []byte
}
// SeedPayload is the list of seeded SLOKs sent to a client.
type SeedPayload struct {
SLOKs []*SLOK
}
// NewConfig initializes a Config with the settings in the specified
// file.
func NewConfig(filename string) (*Config, error) {
config := &Config{}
config.ReloadableFile = common.NewReloadableFile(
filename,
func(fileContent []byte) error {
newConfig, err := LoadConfig(fileContent)
if err != nil {
return common.ContextError(err)
}
// Modify actual traffic rules only after validation
config.Schemes = newConfig.Schemes
return nil
})
_, err := config.Reload()
if err != nil {
return nil, common.ContextError(err)
}
return config, nil
}
// LoadConfig loads, validates, and initializes a JSON encoded OSL
// configuration.
func LoadConfig(configJSON []byte) (*Config, error) {
var config Config
err := json.Unmarshal(configJSON, &config)
if err != nil {
return nil, common.ContextError(err)
}
var previousEpoch time.Time
for _, scheme := range config.Schemes {
epoch, err := time.Parse(time.RFC3339, scheme.Epoch)
if err != nil {
return nil, common.ContextError(fmt.Errorf("invalid epoch format: %s", err))
}
if epoch.UTC() != epoch {
return nil, common.ContextError(errors.New("invalid epoch timezone"))
}
if epoch.Round(time.Duration(scheme.SeedPeriodNanoseconds)) != epoch {
return nil, common.ContextError(errors.New("invalid epoch period"))
}
if epoch.Before(previousEpoch) {
return nil, common.ContextError(errors.New("invalid epoch order"))
}
previousEpoch = epoch
scheme.epoch = epoch
scheme.subnetLookups = make([]common.SubnetLookup, len(scheme.SeedSpecs))
scheme.derivedSLOKCache = make(map[slokReference]*SLOK)
if len(scheme.MasterKey) != KEY_LENGTH_BYTES {
return nil, common.ContextError(errors.New("invalid master key"))
}
for index, seedSpec := range scheme.SeedSpecs {
if len(seedSpec.ID) != KEY_LENGTH_BYTES {
return nil, common.ContextError(errors.New("invalid seed spec ID"))
}
// TODO: check that subnets do not overlap, as required by SubnetLookup
subnetLookup, err := common.NewSubnetLookup(seedSpec.UpstreamSubnets)
if err != nil {
return nil, common.ContextError(fmt.Errorf("invalid upstream subnets: %s", err))
}
scheme.subnetLookups[index] = subnetLookup
}
if !isValidShamirSplit(len(scheme.SeedSpecs), scheme.SeedSpecThreshold) {
return nil, common.ContextError(errors.New("invalid seed spec key split"))
}
if len(scheme.SeedPeriodKeySplits) < 1 {
return nil, common.ContextError(errors.New("invalid seed period key split count"))
}
for _, keySplit := range scheme.SeedPeriodKeySplits {
if !isValidShamirSplit(keySplit.Total, keySplit.Threshold) {
return nil, common.ContextError(errors.New("invalid seed period key split"))
}
}
}
return &config, nil
}
// NewClientSeedState creates a new client seed state to track
// client progress towards seeding SLOKs. psiphond maintains one
// ClientSeedState for each connected client.
//
// A signal is sent on signalIssueSLOKs when sufficient progress
// has been made that a new SLOK *may* be issued. psiphond will
// receive the signal and then call GetClientSeedPayload/IssueSLOKs
// to issue SLOKs, generate payload, and send to the client. The
// sender will not block sending to signalIssueSLOKs; the channel
// should be appropriately buffered.
func (config *Config) NewClientSeedState(
clientRegion, propagationChannelID string,
signalIssueSLOKs chan struct{}) *ClientSeedState {
config.ReloadableFile.RLock()
defer config.ReloadableFile.RUnlock()
state := &ClientSeedState{
propagationChannelID: propagationChannelID,
signalIssueSLOKs: signalIssueSLOKs,
issuedSLOKs: make(map[string]*SLOK),
payloadSLOKs: nil,
}
for _, scheme := range config.Schemes {
// All matching schemes are selected.
// Note: this implementation assumes a few simple schemes. For more
// schemes with many propagation channel IDs or region filters, use
// maps for more efficient lookup.
if scheme.epoch.Before(time.Now().UTC()) &&
common.Contains(scheme.PropagationChannelIDs, propagationChannelID) &&
(len(scheme.Regions) == 0 || common.Contains(scheme.Regions, clientRegion)) {
// Empty progress is initialized up front for all seed specs. Once
// created, the progress structure is read-only (the slice, not the
// TrafficValue fields); this permits lock-free operation.
trafficProgress := make([]*TrafficValues, len(scheme.SeedSpecs))
for index := 0; index < len(scheme.SeedSpecs); index++ {
trafficProgress[index] = &TrafficValues{}
}
seedProgress := &ClientSeedProgress{
scheme: scheme,
progressSLOKTime: getSLOKTime(scheme.SeedPeriodNanoseconds),
trafficProgress: trafficProgress,
}
state.seedProgress = append(state.seedProgress, seedProgress)
}
}
return state
}
// Hibernate clears references to short-lived objects (currently,
// signalIssueSLOKs) so that a ClientSeedState can be stored for
// later resumption without blocking garbage collection of the
// short-lived objects.
//
// The ClientSeedState will still hold references to its Config;
// the caller is responsible for discarding hibernated seed states
// when the config changes.
//
// The caller should ensure that all ClientSeedPortForwards
// associated with this ClientSeedState are closed before
// hibernation.
func (state *ClientSeedState) Hibernate() {
state.mutex.Lock()
defer state.mutex.Unlock()
state.signalIssueSLOKs = nil
}
// Resume resumes a hibernated ClientSeedState by resetting the required
// objects (currently, signalIssueSLOKs) cleared by Hibernate.
func (state *ClientSeedState) Resume(
signalIssueSLOKs chan struct{}) {
state.mutex.Lock()
defer state.mutex.Unlock()
state.signalIssueSLOKs = signalIssueSLOKs
}
// NewClientSeedPortForward creates a new client port forward
// traffic progress tracker. Port forward progress reported to the
// ClientSeedPortForward is added to seed state progress for all
// seed specs containing upstreamIPAddress in their subnets.
// The return value will be nil when activity for upstreamIPAddress
// does not count towards any progress.
// NewClientSeedPortForward may be invoked concurrently by many
// psiphond port forward establishment goroutines.
func (state *ClientSeedState) NewClientSeedPortForward(
upstreamIPAddress net.IP) *ClientSeedPortForward {
// Concurrency: access to ClientSeedState is unsynchronized
// but references only read-only fields.
if len(state.seedProgress) == 0 {
return nil
}
var progressReferences []progressReference
// Determine which seed spec subnets contain upstreamIPAddress
// and point to the progress for each. When progress is reported,
// it is added directly to all of these TrafficValues instances.
// Assumes state.progress entries correspond 1-to-1 with
// state.scheme.subnetLookups.
// Note: this implementation assumes a small number of schemes and
// seed specs. For larger numbers, instead of N SubnetLookups, create
// a single SubnetLookup which returns, for a given IP address, all
// matching subnets and associated seed specs.
for seedProgressIndex, seedProgress := range state.seedProgress {
for trafficProgressIndex, subnetLookup := range seedProgress.scheme.subnetLookups {
if subnetLookup.ContainsIPAddress(upstreamIPAddress) {
progressReferences = append(
progressReferences,
progressReference{
seedProgressIndex: seedProgressIndex,
trafficProgressIndex: trafficProgressIndex,
})
}
}
}
if progressReferences == nil {
return nil
}
return &ClientSeedPortForward{
state: state,
progressReferences: progressReferences,
}
}
func (state *ClientSeedState) sendIssueSLOKsSignal() {
state.mutex.Lock()
defer state.mutex.Unlock()
if state.signalIssueSLOKs != nil {
select {
case state.signalIssueSLOKs <- *new(struct{}):
default:
}
}
}
// UpdateProgress adds port forward bytes transferred and duration to
// all seed spec progresses associated with the port forward.
// If UpdateProgress is invoked after the SLOK time period has rolled
// over, any pending seeded SLOKs are issued and all progress is reset.
// UpdateProgress may be invoked concurrently by many psiphond port
// relay goroutines. The implementation of UpdateProgress prioritizes
// not blocking port forward relaying; a consequence of this lock-free
// design is that progress reported at the exact time of SLOK time period
// rollover may be dropped.
func (portForward *ClientSeedPortForward) UpdateProgress(
bytesRead, bytesWritten int64, durationNanoseconds int64) {
// Concurrency: non-blocking -- access to ClientSeedState is unsynchronized
// to read-only fields, atomic, or channels, except in the case of a time
// period rollover, in which case a mutex is acquired.
for _, progressReference := range portForward.progressReferences {
seedProgress := portForward.state.seedProgress[progressReference.seedProgressIndex]
trafficProgress := seedProgress.trafficProgress[progressReference.trafficProgressIndex]
slokTime := getSLOKTime(seedProgress.scheme.SeedPeriodNanoseconds)
// If the SLOK time period has changed since progress was last recorded,
// call issueSLOKs which will issue any SLOKs for that past time period
// and then clear all progress. Progress will then be recorded for the
// current time period.
// As it acquires the state mutex, issueSLOKs may stall other port
// forwards for this client. The delay is minimized by SLOK caching,
// which avoids redundant crypto operations.
if slokTime != atomic.LoadInt64(&seedProgress.progressSLOKTime) {
portForward.state.mutex.Lock()
portForward.state.issueSLOKs()
portForward.state.mutex.Unlock()
// Call to issueSLOKs may have issued new SLOKs. Note that
// this will only happen if the time period rolls over with
// sufficient progress pending while the signalIssueSLOKs
// receiver did not call IssueSLOKs soon enough.
portForward.state.sendIssueSLOKsSignal()
}
// Add directly to the permanent TrafficValues progress accumulators
// for the state's seed specs. Concurrently, other port forwards may
// be adding to the same accumulators. Also concurrently, another
// goroutine may be invoking issueSLOKs, which zeros all the accumulators.
// As a consequence, progress may be dropped at the exact time of
// time period rollover.
seedSpec := seedProgress.scheme.SeedSpecs[progressReference.trafficProgressIndex]
alreadyExceedsTargets := trafficProgress.exceeds(&seedSpec.Targets)
atomic.AddInt64(&trafficProgress.BytesRead, bytesRead)
atomic.AddInt64(&trafficProgress.BytesWritten, bytesWritten)
atomic.AddInt64(&trafficProgress.PortForwardDurationNanoseconds, durationNanoseconds)
// With the target newly met for a SeedSpec, a new
// SLOK *may* be issued.
if !alreadyExceedsTargets && trafficProgress.exceeds(&seedSpec.Targets) {
portForward.state.sendIssueSLOKsSignal()
}
}
}
func (lhs *TrafficValues) exceeds(rhs *TrafficValues) bool {
return atomic.LoadInt64(&lhs.BytesRead) >= atomic.LoadInt64(&rhs.BytesRead) &&
atomic.LoadInt64(&lhs.BytesWritten) >= atomic.LoadInt64(&rhs.BytesWritten) &&
atomic.LoadInt64(&lhs.PortForwardDurationNanoseconds) >=
atomic.LoadInt64(&rhs.PortForwardDurationNanoseconds)
}
// issueSLOKs checks client progress against each candidate seed spec
// and seeds SLOKs when the client traffic levels are achieved. After
// checking progress, and if the SLOK time period has changed since
// progress was last recorded, progress is reset. Partial, insufficient
// progress is intentionally dropped when the time period rolls over.
// Derived SLOKs are cached to avoid redundant CPU intensive operations.
// All issued SLOKs are retained in the client state for the duration
// of the client's session.
func (state *ClientSeedState) issueSLOKs() {
// Concurrency: the caller must lock state.mutex.
if len(state.seedProgress) == 0 {
return
}
for _, seedProgress := range state.seedProgress {
progressSLOKTime := time.Unix(0, seedProgress.progressSLOKTime)
for index, trafficProgress := range seedProgress.trafficProgress {
seedSpec := seedProgress.scheme.SeedSpecs[index]
if trafficProgress.exceeds(&seedSpec.Targets) {
ref := &slokReference{
PropagationChannelID: state.propagationChannelID,
SeedSpecID: string(seedSpec.ID),
Time: progressSLOKTime,
}
seedProgress.scheme.derivedSLOKCacheMutex.RLock()
slok, ok := seedProgress.scheme.derivedSLOKCache[*ref]
seedProgress.scheme.derivedSLOKCacheMutex.RUnlock()
if !ok {
slok = seedProgress.scheme.deriveSLOK(ref)
seedProgress.scheme.derivedSLOKCacheMutex.Lock()
seedProgress.scheme.derivedSLOKCache[*ref] = slok
seedProgress.scheme.derivedSLOKCacheMutex.Unlock()
}
// Previously issued SLOKs are not re-added to
// the payload.
if state.issuedSLOKs[string(slok.ID)] == nil {
state.issuedSLOKs[string(slok.ID)] = slok
state.payloadSLOKs = append(state.payloadSLOKs, slok)
}
}
}
slokTime := getSLOKTime(seedProgress.scheme.SeedPeriodNanoseconds)
if slokTime != atomic.LoadInt64(&seedProgress.progressSLOKTime) {
atomic.StoreInt64(&seedProgress.progressSLOKTime, slokTime)
// The progress map structure is not reset or modifed; instead
// the mapped accumulator values are zeroed. Concurrently, port
// forward relay goroutines continue to add to these accumulators.
for _, trafficProgress := range seedProgress.trafficProgress {
atomic.StoreInt64(&trafficProgress.BytesRead, 0)
atomic.StoreInt64(&trafficProgress.BytesWritten, 0)
atomic.StoreInt64(&trafficProgress.PortForwardDurationNanoseconds, 0)
}
}
}
}
func getSLOKTime(seedPeriodNanoseconds int64) int64 {
return time.Now().UTC().Truncate(time.Duration(seedPeriodNanoseconds)).UnixNano()
}
// GetSeedPayload issues any pending SLOKs and returns the accumulated
// SLOKs for a given client. psiphond will calls this when it receives
// signalIssueSLOKs which is the trigger to check for new SLOKs.
// Note: caller must not modify the SLOKs in SeedPayload.SLOKs
// as these are shared data.
func (state *ClientSeedState) GetSeedPayload() *SeedPayload {
state.mutex.Lock()
defer state.mutex.Unlock()
if len(state.seedProgress) == 0 {
return &SeedPayload{}
}
state.issueSLOKs()
sloks := make([]*SLOK, len(state.payloadSLOKs))
for index, slok := range state.payloadSLOKs {
sloks[index] = slok
}
return &SeedPayload{
SLOKs: sloks,
}
}
// ClearSeedPayload resets the accumulated SLOK payload (but not SLOK
// progress). psiphond calls this after the client has acknowledged
// receipt of a payload.
func (state *ClientSeedState) ClearSeedPayload() {
state.mutex.Lock()
defer state.mutex.Unlock()
state.payloadSLOKs = nil
}
// deriveSLOK produces SLOK secret keys and IDs using HKDF-Expand
// defined in https://tools.ietf.org/html/rfc5869.
func (scheme *Scheme) deriveSLOK(ref *slokReference) *SLOK {
timeBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(timeBytes, uint64(ref.Time.UnixNano()))
key := deriveKeyHKDF(
scheme.MasterKey,
[]byte(ref.PropagationChannelID),
[]byte(ref.SeedSpecID),
timeBytes)
// TODO: is ID derivation cryptographically sound?
id := deriveKeyHKDF(
scheme.MasterKey,
key)
return &SLOK{
ID: id,
Key: key,
}
}
// GetOSLDuration returns the total time duration of an OSL,
// which is a function of the scheme's SeedPeriodNanoSeconds,
// the duration of a single SLOK, and the scheme's SeedPeriodKeySplits,
// the number of SLOKs associated with an OSL.
func (scheme *Scheme) GetOSLDuration() time.Duration {
slokTimePeriodsPerOSL := 1
for _, keySplit := range scheme.SeedPeriodKeySplits {
slokTimePeriodsPerOSL *= keySplit.Total
}
return time.Duration(
int64(slokTimePeriodsPerOSL) * scheme.SeedPeriodNanoseconds)
}
// PaveFile describes an OSL data file to be paved to an out-of-band
// distribution drop site. There are two types of files: a registry,
// which describes how to assemble keys for OSLs, and the encrypted
// OSL files.
type PaveFile struct {
Name string
Contents []byte
}
// Registry describes a set of OSL files.
type Registry struct {
FileSpecs []*OSLFileSpec
}
// An OSLFileSpec includes an ID which is used to reference the
// OSL file and describes the key splits used to divide the OSL
// file key along with the SLOKs required to reassemble those keys.
//
// The MD5Sum field is a checksum of the contents of the OSL file
// to be used to skip redownloading previously downloaded files.
// MD5 is not cryptographically secure and this checksum is not
// relied upon for OSL verification. MD5 is used for compatibility
// with out-of-band distribution hosts.
type OSLFileSpec struct {
ID []byte
KeyShares *KeyShares
MD5Sum []byte
}
// KeyShares is a tree data structure which describes the
// key splits used to divide a secret key. BoxedShares are encrypted
// shares of the key, and #Threshold amount of decrypted BoxedShares
// are required to reconstruct the secret key. The keys for BoxedShares
// are either SLOKs (referenced by SLOK ID) or random keys that are
// themselves split as described in child KeyShares.
type KeyShares struct {
Threshold int
BoxedShares [][]byte
SLOKIDs [][]byte
KeyShares []*KeyShares
}
type PaveLogInfo struct {
FileName string
SchemeIndex int
PropagationChannelID string
OSLID string
OSLTime time.Time
OSLDuration time.Duration
ServerEntryCount int
}
// Pave creates the full set of OSL files, for all schemes in the
// configuration, to be dropped in an out-of-band distribution site.
// Only OSLs for the propagation channel ID associated with the
// distribution site are paved. This function is used by automation.
//
// The Name component of each file relates to the values returned by
// the client functions GetRegistryURL and GetOSLFileURL.
//
// Pave returns a pave file for the entire registry of all OSLs from
// epoch to endTime, and a pave file for each OSL. paveServerEntries is
// a map from hex-encoded OSL IDs to server entries to pave into that OSL.
// When entries are found, OSL will contain those entries, newline
// separated. Otherwise the OSL will still be issued, but be empty (unless
// the scheme is in omitEmptyOSLsSchemes).
//
// As OSLs outside the epoch-endTime range will no longer appear in
// the registry, Pave is intended to be used to create the full set
// of OSLs for a distribution site; i.e., not incrementally.
//
// Automation is responsible for consistently distributing server entries
// to OSLs in the case where OSLs are repaved in subsequent calls.
func (config *Config) Pave(
endTime time.Time,
propagationChannelID string,
signingPublicKey string,
signingPrivateKey string,
paveServerEntries map[string][]string,
omitMD5SumsSchemes []int,
omitEmptyOSLsSchemes []int,
logCallback func(*PaveLogInfo)) ([]*PaveFile, error) {
config.ReloadableFile.RLock()
defer config.ReloadableFile.RUnlock()
var paveFiles []*PaveFile
registry := &Registry{}
for schemeIndex, scheme := range config.Schemes {
if common.Contains(scheme.PropagationChannelIDs, propagationChannelID) {
omitMD5Sums := common.ContainsInt(omitMD5SumsSchemes, schemeIndex)
omitEmptyOSLs := common.ContainsInt(omitEmptyOSLsSchemes, schemeIndex)
oslDuration := scheme.GetOSLDuration()
oslTime := scheme.epoch
for !oslTime.After(endTime) {
firstSLOKTime := oslTime
fileKey, fileSpec, err := makeOSLFileSpec(
scheme, propagationChannelID, firstSLOKTime)
if err != nil {
return nil, common.ContextError(err)
}
hexEncodedOSLID := hex.EncodeToString(fileSpec.ID)
serverEntryCount := len(paveServerEntries[hexEncodedOSLID])
if serverEntryCount > 0 || !omitEmptyOSLs {
registry.FileSpecs = append(registry.FileSpecs, fileSpec)
// serverEntries will be "" when nothing is found in paveServerEntries
serverEntries := strings.Join(paveServerEntries[hexEncodedOSLID], "\n")
serverEntriesPackage, err := common.WriteAuthenticatedDataPackage(
serverEntries,
signingPublicKey,
signingPrivateKey)
if err != nil {
return nil, common.ContextError(err)
}
boxedServerEntries, err := box(fileKey, serverEntriesPackage)
if err != nil {
return nil, common.ContextError(err)
}
if !omitMD5Sums {
md5sum := md5.Sum(boxedServerEntries)
fileSpec.MD5Sum = md5sum[:]
}
fileName := fmt.Sprintf(
OSL_FILENAME_FORMAT, hexEncodedOSLID)
paveFiles = append(paveFiles, &PaveFile{
Name: fileName,
Contents: boxedServerEntries,
})
if logCallback != nil {
logCallback(&PaveLogInfo{
FileName: fileName,
SchemeIndex: schemeIndex,
PropagationChannelID: propagationChannelID,
OSLID: hexEncodedOSLID,
OSLTime: oslTime,
OSLDuration: oslDuration,
ServerEntryCount: serverEntryCount,
})
}
}
oslTime = oslTime.Add(oslDuration)
}
}
}
registryJSON, err := json.Marshal(registry)
if err != nil {
return nil, common.ContextError(err)
}
registryPackage, err := common.WriteAuthenticatedDataPackage(
base64.StdEncoding.EncodeToString(registryJSON),
signingPublicKey,
signingPrivateKey)
if err != nil {
return nil, common.ContextError(err)
}
paveFiles = append(paveFiles, &PaveFile{
Name: REGISTRY_FILENAME,
Contents: registryPackage,
})
return paveFiles, nil
}
// CurrentOSLIDs returns a mapping from each propagation channel ID in the
// specified scheme to the corresponding current time period, hex-encoded OSL ID.
func (config *Config) CurrentOSLIDs(schemeIndex int) (map[string]string, error) {
config.ReloadableFile.RLock()
defer config.ReloadableFile.RUnlock()
if schemeIndex < 0 || schemeIndex >= len(config.Schemes) {
return nil, common.ContextError(errors.New("invalid scheme index"))
}
scheme := config.Schemes[schemeIndex]
now := time.Now().UTC()
oslDuration := scheme.GetOSLDuration()
oslTime := scheme.epoch.Add((now.Sub(scheme.epoch) / oslDuration) * oslDuration)
OSLIDs := make(map[string]string)
for _, propagationChannelID := range scheme.PropagationChannelIDs {
_, fileSpec, err := makeOSLFileSpec(scheme, propagationChannelID, oslTime)
if err != nil {
return nil, common.ContextError(err)
}
OSLIDs[propagationChannelID] = hex.EncodeToString(fileSpec.ID)
}
return OSLIDs, nil
}
// makeOSLFileSpec creates an OSL file key, splits it according to the
// scheme's key splits, and sets the OSL ID as its first SLOK ID. The
// returned key is used to encrypt the OSL payload and then discarded;
// the key may be reassembled using the data in the KeyShares tree,
// given sufficient SLOKs.
func makeOSLFileSpec(
scheme *Scheme,
propagationChannelID string,
firstSLOKTime time.Time) ([]byte, *OSLFileSpec, error) {
ref := &slokReference{
PropagationChannelID: propagationChannelID,
SeedSpecID: string(scheme.SeedSpecs[0].ID),
Time: firstSLOKTime,
}
firstSLOK := scheme.deriveSLOK(ref)
oslID := firstSLOK.ID
// Note: previously, fileKey was a random key. Now, the key
// is derived from the master key and OSL ID. This deterministic
// derivation ensures that repeated paves of the same OSL
// with the same ID and same content yields the same MD5Sum
// to avoid wasteful downloads.
//
// Similarly, the shareKeys generated in divideKey and the Shamir
// key splitting random polynomials are now both determinisitcally
// generated from a seeded CSPRNG. This ensures that the OSL
// registry remains identical for repeated paves of the same config
// and parameters.
//
// The split structure is added to the deterministic key
// derivation so that changes to the split configuration will not
// expose the same key material to different SLOK combinations.
splitStructure := make([]byte, 16*(1+len(scheme.SeedPeriodKeySplits)))
i := 0
binary.LittleEndian.PutUint64(splitStructure[i:], uint64(len(scheme.SeedSpecs)))
binary.LittleEndian.PutUint64(splitStructure[i+8:], uint64(scheme.SeedSpecThreshold))
i += 16
for _, keySplit := range scheme.SeedPeriodKeySplits {
binary.LittleEndian.PutUint64(splitStructure[i:], uint64(keySplit.Total))
binary.LittleEndian.PutUint64(splitStructure[i+8:], uint64(keySplit.Threshold))
i += 16
}
fileKey := deriveKeyHKDF(
scheme.MasterKey,
splitStructure,
[]byte("osl-file-key"),
oslID)
splitKeyMaterialSeed := deriveKeyHKDF(
scheme.MasterKey,
splitStructure,
[]byte("osl-file-split-key-material-seed"),
oslID)
keyMaterialReader, err := newSeededKeyMaterialReader(splitKeyMaterialSeed)
if err != nil {
return nil, nil, common.ContextError(err)
}
keyShares, err := divideKey(
scheme,
keyMaterialReader,
fileKey,
scheme.SeedPeriodKeySplits,
propagationChannelID,
&firstSLOKTime)
if err != nil {
return nil, nil, common.ContextError(err)
}
fileSpec := &OSLFileSpec{
ID: oslID,
KeyShares: keyShares,
}
return fileKey, fileSpec, nil
}
// divideKey recursively constructs a KeyShares tree.
func divideKey(
scheme *Scheme,
keyMaterialReader io.Reader,
key []byte,
keySplits []KeySplit,
propagationChannelID string,
nextSLOKTime *time.Time) (*KeyShares, error) {
keySplitIndex := len(keySplits) - 1
keySplit := keySplits[keySplitIndex]
shares, err := shamirSplit(
key,
keySplit.Total,
keySplit.Threshold,
keyMaterialReader)
if err != nil {
return nil, common.ContextError(err)
}
var boxedShares [][]byte
var keyShares []*KeyShares
for _, share := range shares {
var shareKey [KEY_LENGTH_BYTES]byte
n, err := keyMaterialReader.Read(shareKey[:])
if err == nil && n != len(shareKey) {
err = errors.New("unexpected length")
}
if err != nil {
return nil, common.ContextError(err)
}
if keySplitIndex > 0 {
keyShare, err := divideKey(
scheme,
keyMaterialReader,
shareKey[:],
keySplits[0:keySplitIndex],
propagationChannelID,
nextSLOKTime)
if err != nil {
return nil, common.ContextError(err)
}
keyShares = append(keyShares, keyShare)
} else {
keyShare, err := divideKeyWithSeedSpecSLOKs(
scheme,
keyMaterialReader,
shareKey[:],
propagationChannelID,
nextSLOKTime)
if err != nil {
return nil, common.ContextError(err)
}
keyShares = append(keyShares, keyShare)
*nextSLOKTime = nextSLOKTime.Add(time.Duration(scheme.SeedPeriodNanoseconds))
}
boxedShare, err := box(shareKey[:], share)
if err != nil {
return nil, common.ContextError(err)
}
boxedShares = append(boxedShares, boxedShare)
}
return &KeyShares{
Threshold: keySplit.Threshold,
BoxedShares: boxedShares,
SLOKIDs: nil,
KeyShares: keyShares,
}, nil
}
func divideKeyWithSeedSpecSLOKs(
scheme *Scheme,
keyMaterialReader io.Reader,
key []byte,
propagationChannelID string,
nextSLOKTime *time.Time) (*KeyShares, error) {
var boxedShares [][]byte
var slokIDs [][]byte
shares, err := shamirSplit(
key,
len(scheme.SeedSpecs),
scheme.SeedSpecThreshold,
keyMaterialReader)
if err != nil {
return nil, common.ContextError(err)
}
for index, seedSpec := range scheme.SeedSpecs {
ref := &slokReference{
PropagationChannelID: propagationChannelID,
SeedSpecID: string(seedSpec.ID),
Time: *nextSLOKTime,
}
slok := scheme.deriveSLOK(ref)
boxedShare, err := box(slok.Key, shares[index])
if err != nil {
return nil, common.ContextError(err)
}
boxedShares = append(boxedShares, boxedShare)
slokIDs = append(slokIDs, slok.ID)
}
return &KeyShares{
Threshold: scheme.SeedSpecThreshold,
BoxedShares: boxedShares,
SLOKIDs: slokIDs,
KeyShares: nil,
}, nil
}
// reassembleKey recursively traverses a KeyShares tree, determining
// whether there exists suffient SLOKs to reassemble the root key and
// performing the key assembly as required.
func (keyShares *KeyShares) reassembleKey(lookup SLOKLookup, unboxKey bool) (bool, []byte, error) {
if (len(keyShares.SLOKIDs) > 0 && len(keyShares.KeyShares) > 0) ||
(len(keyShares.SLOKIDs) > 0 && len(keyShares.SLOKIDs) != len(keyShares.BoxedShares)) ||
(len(keyShares.KeyShares) > 0 && len(keyShares.KeyShares) != len(keyShares.BoxedShares)) {
return false, nil, common.ContextError(errors.New("unexpected KeyShares format"))
}
shareCount := 0
var shares [][]byte
if unboxKey {
// Note: shamirCombine infers share indices from slice offset, so the full
// keyShares.Total slots are allocated and missing shares are left nil.
shares = make([][]byte, len(keyShares.BoxedShares))
}
if len(keyShares.SLOKIDs) > 0 {
for i := 0; i < len(keyShares.SLOKIDs) && shareCount < keyShares.Threshold; i++ {
slokKey := lookup(keyShares.SLOKIDs[i])
if slokKey == nil {
continue
}
shareCount += 1
if unboxKey {
share, err := unbox(slokKey, keyShares.BoxedShares[i])
if err != nil {
return false, nil, common.ContextError(err)
}
shares[i] = share
}
}
} else {
for i := 0; i < len(keyShares.KeyShares) && shareCount < keyShares.Threshold; i++ {
ok, key, err := keyShares.KeyShares[i].reassembleKey(lookup, unboxKey)
if err != nil {
return false, nil, common.ContextError(err)
}
if !ok {
continue
}
shareCount += 1
if unboxKey {
share, err := unbox(key, keyShares.BoxedShares[i])
if err != nil {
return false, nil, common.ContextError(err)
}
shares[i] = share
}
}
}
if shareCount < keyShares.Threshold {
return false, nil, nil
}
if !unboxKey {
return true, nil, nil
}
joinedKey := shamirCombine(shares)
return true, joinedKey, nil
}
// GetOSLRegistryURL returns the URL for an OSL registry. Clients
// call this when fetching the registry from out-of-band
// distribution sites.
// Clients are responsible for tracking whether the remote file has
// changed or not before downloading.
func GetOSLRegistryURL(baseURL string) string {
u, err := url.Parse(baseURL)
if err != nil {
return ""
}
u.Path = path.Join(u.Path, REGISTRY_FILENAME)
return u.String()
}
// GetOSLRegistryFilename returns an appropriate filename for
// the resumable download destination for the OSL registry.
func GetOSLRegistryFilename(baseDirectory string) string {
return filepath.Join(baseDirectory, REGISTRY_FILENAME)
}
// GetOSLFileURL returns the URL for an OSL file. Once the client
// has determined, from GetSeededOSLIDs, which OSLs it has sufficiently
// seeded, it calls this to fetch the OSLs for download and decryption.
// Clients are responsible for tracking whether the remote file has
// changed or not before downloading.
func GetOSLFileURL(baseURL string, oslID []byte) string {
u, err := url.Parse(baseURL)
if err != nil {
return ""
}
u.Path = path.Join(
u.Path, fmt.Sprintf(OSL_FILENAME_FORMAT, hex.EncodeToString(oslID)))
return u.String()
}
// GetOSLFilename returns an appropriate filename for the resumable
// download destination for the OSL file.
func GetOSLFilename(baseDirectory string, oslID []byte) string {
return filepath.Join(
baseDirectory, fmt.Sprintf(OSL_FILENAME_FORMAT, hex.EncodeToString(oslID)))
}
// SLOKLookup is a callback to lookup SLOK keys by ID.
type SLOKLookup func([]byte) []byte
// RegistryStreamer authenticates and processes a JSON encoded OSL registry.
// The streamer processes the registry without loading the entire file
// into memory, parsing each OSL file spec in turn and returning those
// OSL file specs for which the client has sufficient SLOKs to reassemble
// the OSL key and decrypt.
//
// At this stage, SLOK reassembly simply does SLOK ID lookups and threshold
// counting and does not derive keys for every OSL. This allows the client
// to defer key derivation until NewOSLReader for cases where it has not
// already imported the OSL.
//
// The client's propagation channel ID is used implicitly: it determines the
// base URL used to download the registry and OSL files. If the client has
// seeded SLOKs from a propagation channel ID different than the one associated
// with its present base URL, they will not appear in the registry and not
// be used.
type RegistryStreamer struct {
jsonDecoder *json.Decoder
lookup SLOKLookup
}
// NewRegistryStreamer creates a new RegistryStreamer.
func NewRegistryStreamer(
registryFileContent io.ReadSeeker,
signingPublicKey string,
lookup SLOKLookup) (*RegistryStreamer, error) {
payloadReader, err := common.NewAuthenticatedDataPackageReader(
registryFileContent, signingPublicKey)
if err != nil {
return nil, common.ContextError(err)
}
base64Decoder := base64.NewDecoder(base64.StdEncoding, payloadReader)
// A json.Decoder is used to stream the JSON payload, which
// is expected to be of the following form, corresponding
// to the Registry struct type:
//
// {"FileSpecs" : [{...}, {...}, ..., {...}]}
jsonDecoder := json.NewDecoder(base64Decoder)
err = expectJSONDelimiter(jsonDecoder, "{")
if err != nil {
return nil, common.ContextError(err)
}
token, err := jsonDecoder.Token()
if err != nil {
return nil, common.ContextError(err)
}
name, ok := token.(string)
if !ok {
return nil, common.ContextError(
fmt.Errorf("unexpected token type: %T", token))
}
if name != "FileSpecs" {
return nil, common.ContextError(
fmt.Errorf("unexpected field name: %s", name))
}
err = expectJSONDelimiter(jsonDecoder, "[")
if err != nil {
return nil, common.ContextError(err)
}
return &RegistryStreamer{
jsonDecoder: jsonDecoder,
lookup: lookup,
}, nil
}
// Next returns the next OSL file spec that the client
// has sufficient SLOKs to decrypt. The client calls
// NewOSLReader with the file spec to process that OSL.
// Next returns nil at EOF.
func (s *RegistryStreamer) Next() (*OSLFileSpec, error) {
for {
if s.jsonDecoder.More() {
var fileSpec OSLFileSpec
err := s.jsonDecoder.Decode(&fileSpec)
if err != nil {
return nil, common.ContextError(err)
}
ok, _, err := fileSpec.KeyShares.reassembleKey(s.lookup, false)
if err != nil {
return nil, common.ContextError(err)
}
if ok {
return &fileSpec, nil
}
} else {
// Expect the end of the FileSpecs array.
err := expectJSONDelimiter(s.jsonDecoder, "]")
if err != nil {
return nil, common.ContextError(err)
}
// Expect the end of the Registry object.
err = expectJSONDelimiter(s.jsonDecoder, "}")
if err != nil {
return nil, common.ContextError(err)
}
// Expect the end of the registry content.
_, err = s.jsonDecoder.Token()
if err != io.EOF {
return nil, common.ContextError(err)
}
return nil, nil
}
}
}
func expectJSONDelimiter(jsonDecoder *json.Decoder, delimiter string) error {
token, err := jsonDecoder.Token()
if err != nil {
return common.ContextError(err)
}
delim, ok := token.(json.Delim)
if !ok {
return common.ContextError(
fmt.Errorf("unexpected token type: %T", token))
}
if delim.String() != delimiter {
return common.ContextError(
fmt.Errorf("unexpected delimiter: %s", delim.String()))
}
return nil
}
// NewOSLReader decrypts, authenticates and streams an OSL payload.
func NewOSLReader(
oslFileContent io.ReadSeeker,
fileSpec *OSLFileSpec,
lookup SLOKLookup,
signingPublicKey string) (io.Reader, error) {
ok, fileKey, err := fileSpec.KeyShares.reassembleKey(lookup, true)
if err != nil {
return nil, common.ContextError(err)
}
if !ok {
return nil, common.ContextError(errors.New("unseeded OSL"))
}
if len(fileKey) != KEY_LENGTH_BYTES {
return nil, common.ContextError(errors.New("invalid key length"))
}
var nonce [24]byte
var key [KEY_LENGTH_BYTES]byte
copy(key[:], fileKey)
unboxer, err := secretbox.NewOpenReadSeeker(oslFileContent, &nonce, &key)
if err != nil {
return nil, common.ContextError(err)
}
return common.NewAuthenticatedDataPackageReader(
unboxer,
signingPublicKey)
}
// zeroReader reads an unlimited stream of zeroes.
type zeroReader struct {
}
func (z *zeroReader) Read(p []byte) (int, error) {
for i := 0; i < len(p); i++ {
p[i] = 0
}
return len(p), nil
}
// newSeededKeyMaterialReader constructs a CSPRNG using AES-CTR.
// The seed is the AES key and the IV is fixed and constant.
// Using same seed will always produce the same output stream.
// The data stream is intended to be used to deterministically
// generate key material and is not intended as a general
// purpose CSPRNG.
func newSeededKeyMaterialReader(seed []byte) (io.Reader, error) {
if len(seed) != KEY_LENGTH_BYTES {
return nil, common.ContextError(errors.New("invalid key length"))
}
aesCipher, err := aes.NewCipher(seed)
if err != nil {
return nil, common.ContextError(err)
}
var iv [aes.BlockSize]byte
return &cipher.StreamReader{
S: cipher.NewCTR(aesCipher, iv[:]),
R: new(zeroReader),
}, nil
}
// deriveKeyHKDF implements HKDF-Expand as defined in https://tools.ietf.org/html/rfc5869
// where masterKey = PRK, context = info, and L = 32; SHA-256 is used so HashLen = 32
func deriveKeyHKDF(masterKey []byte, context ...[]byte) []byte {
// TODO: use golang.org/x/crypto/hkdf?
mac := hmac.New(sha256.New, masterKey)
for _, item := range context {
mac.Write([]byte(item))
}
mac.Write([]byte{byte(0x01)})
return mac.Sum(nil)
}
// isValidShamirSplit checks sss.Split constraints
func isValidShamirSplit(total, threshold int) bool {
if total < 1 || total > 254 || threshold < 1 || threshold > total {
return false
}
return true
}
// shamirSplit is a helper wrapper for sss.Split
func shamirSplit(
secret []byte,
total, threshold int,
randReader io.Reader) ([][]byte, error) {
if !isValidShamirSplit(total, threshold) {
return nil, common.ContextError(errors.New("invalid parameters"))
}
if threshold == 1 {
// Special case: each share is simply the secret
shares := make([][]byte, total)
for i := 0; i < total; i++ {
shares[i] = secret
}
return shares, nil
}
shareMap, err := sss.SplitUsingReader(
byte(total), byte(threshold), secret, randReader)
if err != nil {
return nil, common.ContextError(err)
}
shares := make([][]byte, total)
for i := 0; i < total; i++ {
// Note: sss.Combine index starts at 1
shares[i] = shareMap[byte(i)+1]
}
return shares, nil
}
// shamirCombine is a helper wrapper for sss.Combine
func shamirCombine(shares [][]byte) []byte {
if len(shares) == 1 {
// Special case: each share is simply the secret
return shares[0]
}
// Convert a sparse list into a map
shareMap := make(map[byte][]byte)
for index, share := range shares {
if share != nil {
// Note: sss.Combine index starts at 1
shareMap[byte(index)+1] = share
}
}
return sss.Combine(shareMap)
}
// box is a helper wrapper for secretbox.Seal.
// A constant nonce is used, which is secure so long as
// each key is used to encrypt only one message.
func box(key, plaintext []byte) ([]byte, error) {
if len(key) != KEY_LENGTH_BYTES {
return nil, common.ContextError(errors.New("invalid key length"))
}
var nonce [24]byte
var secretboxKey [KEY_LENGTH_BYTES]byte
copy(secretboxKey[:], key)
box := secretbox.Seal(nil, plaintext, &nonce, &secretboxKey)
return box, nil
}
// unbox is a helper wrapper for secretbox.Open
func unbox(key, box []byte) ([]byte, error) {
if len(key) != KEY_LENGTH_BYTES {
return nil, common.ContextError(errors.New("invalid key length"))
}
var nonce [24]byte
var secretboxKey [KEY_LENGTH_BYTES]byte
copy(secretboxKey[:], key)
plaintext, ok := secretbox.Open(nil, box, &nonce, &secretboxKey)
if !ok {
return nil, common.ContextError(errors.New("unbox failed"))
}
return plaintext, nil
}
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Go
1
https://gitee.com/lqinggang/psiphon-tunnel-core.git
git@gitee.com:lqinggang/psiphon-tunnel-core.git
lqinggang
psiphon-tunnel-core
psiphon-tunnel-core
v1.0.6

搜索帮助