1 Star 0 Fork 0

lqinggang/psiphon-tunnel-core

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
controller.go 66.87 KB
一键复制 编辑 原始数据 按行查看 历史
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976
/*
* Copyright (c) 2015, Psiphon Inc.
* All rights reserved.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
// Package psiphon implements the core tunnel functionality of a Psiphon client.
// The main function is RunForever, which runs a Controller that obtains lists of
// servers, establishes tunnel connections, and runs local proxies through which
// tunneled traffic may be sent.
package psiphon
import (
"context"
"errors"
"fmt"
"math/rand"
"net"
"sync"
"time"
"github.com/Psiphon-Labs/goarista/monotime"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tactics"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tun"
)
// Controller is a tunnel lifecycle coordinator. It manages lists of servers to
// connect to; establishes and monitors tunnels; and runs local proxies which
// route traffic through the tunnels.
type Controller struct {
config *Config
runCtx context.Context
stopRunning context.CancelFunc
runWaitGroup *sync.WaitGroup
connectedTunnels chan *Tunnel
failedTunnels chan *Tunnel
tunnelMutex sync.Mutex
establishedOnce bool
tunnels []*Tunnel
nextTunnel int
startedConnectedReporter bool
isEstablishing bool
protocolSelectionConstraints *protocolSelectionConstraints
concurrentEstablishTunnelsMutex sync.Mutex
establishConnectTunnelCount int
concurrentEstablishTunnels int
concurrentIntensiveEstablishTunnels int
peakConcurrentEstablishTunnels int
peakConcurrentIntensiveEstablishTunnels int
establishCtx context.Context
stopEstablish context.CancelFunc
establishWaitGroup *sync.WaitGroup
candidateServerEntries chan *candidateServerEntry
untunneledDialConfig *DialConfig
splitTunnelClassifier *SplitTunnelClassifier
signalFetchCommonRemoteServerList chan struct{}
signalFetchObfuscatedServerLists chan struct{}
signalDownloadUpgrade chan string
signalReportConnected chan struct{}
serverAffinityDoneBroadcast chan struct{}
packetTunnelClient *tun.Client
packetTunnelTransport *PacketTunnelTransport
staggerMutex sync.Mutex
}
// NewController initializes a new controller.
func NewController(config *Config) (controller *Controller, err error) {
if !config.IsCommitted() {
return nil, common.ContextError(errors.New("uncommitted config"))
}
// Needed by regen, at least
rand.Seed(int64(time.Now().Nanosecond()))
// The session ID for the Psiphon server API is used across all
// tunnels established by the controller.
NoticeSessionId(config.SessionID)
untunneledDialConfig := &DialConfig{
UpstreamProxyURL: config.UpstreamProxyURL,
CustomHeaders: config.CustomHeaders,
DeviceBinder: config.deviceBinder,
DnsServerGetter: config.DnsServerGetter,
IPv6Synthesizer: config.IPv6Synthesizer,
TrustedCACertificatesFilename: config.TrustedCACertificatesFilename,
}
controller = &Controller{
config: config,
runWaitGroup: new(sync.WaitGroup),
// connectedTunnels and failedTunnels buffer sizes are large enough to
// receive full pools of tunnels without blocking. Senders should not block.
connectedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
failedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
tunnels: make([]*Tunnel, 0),
establishedOnce: false,
startedConnectedReporter: false,
isEstablishing: false,
untunneledDialConfig: untunneledDialConfig,
// TODO: Add a buffer of 1 so we don't miss a signal while receiver is
// starting? Trade-off is potential back-to-back fetch remotes. As-is,
// establish will eventually signal another fetch remote.
signalFetchCommonRemoteServerList: make(chan struct{}),
signalFetchObfuscatedServerLists: make(chan struct{}),
signalDownloadUpgrade: make(chan string),
signalReportConnected: make(chan struct{}),
}
controller.splitTunnelClassifier = NewSplitTunnelClassifier(config, controller)
if config.PacketTunnelTunFileDescriptor > 0 {
// Run a packet tunnel client. The lifetime of the tun.Client is the
// lifetime of the Controller, so it exists across tunnel establishments
// and reestablishments. The PacketTunnelTransport provides a layer
// that presents a continuosuly existing transport to the tun.Client;
// it's set to use new SSH channels after new SSH tunnel establishes.
packetTunnelTransport := NewPacketTunnelTransport()
packetTunnelClient, err := tun.NewClient(&tun.ClientConfig{
Logger: NoticeCommonLogger(),
TunFileDescriptor: config.PacketTunnelTunFileDescriptor,
Transport: packetTunnelTransport,
})
if err != nil {
return nil, common.ContextError(err)
}
controller.packetTunnelClient = packetTunnelClient
controller.packetTunnelTransport = packetTunnelTransport
}
return controller, nil
}
// Run executes the controller. Run exits if a controller
// component fails or the parent context is canceled.
func (controller *Controller) Run(ctx context.Context) {
pprofRun()
// Ensure fresh repetitive notice state for each run, so the
// client will always get an AvailableEgressRegions notice,
// an initial instance of any repetitive error notice, etc.
ResetRepetitiveNotices()
runCtx, stopRunning := context.WithCancel(ctx)
defer stopRunning()
controller.runCtx = runCtx
controller.stopRunning = stopRunning
// Start components
// TODO: IPv6 support
var listenIP string
if controller.config.ListenInterface == "" {
listenIP = "127.0.0.1"
} else if controller.config.ListenInterface == "any" {
listenIP = "0.0.0.0"
} else {
IPv4Address, _, err := common.GetInterfaceIPAddresses(controller.config.ListenInterface)
if err == nil && IPv4Address == nil {
err = fmt.Errorf("no IPv4 address for interface %s", controller.config.ListenInterface)
}
if err != nil {
NoticeError("error getting listener IP: %s", err)
return
}
listenIP = IPv4Address.String()
}
if !controller.config.DisableLocalSocksProxy {
socksProxy, err := NewSocksProxy(controller.config, controller, listenIP)
if err != nil {
NoticeAlert("error initializing local SOCKS proxy: %s", err)
return
}
defer socksProxy.Close()
}
if !controller.config.DisableLocalHTTPProxy {
httpProxy, err := NewHttpProxy(controller.config, controller, listenIP)
if err != nil {
NoticeAlert("error initializing local HTTP proxy: %s", err)
return
}
defer httpProxy.Close()
}
if !controller.config.DisableRemoteServerListFetcher {
if controller.config.RemoteServerListURLs != nil {
controller.runWaitGroup.Add(1)
go controller.remoteServerListFetcher(
"common",
FetchCommonRemoteServerList,
controller.signalFetchCommonRemoteServerList)
}
if controller.config.ObfuscatedServerListRootURLs != nil {
controller.runWaitGroup.Add(1)
go controller.remoteServerListFetcher(
"obfuscated",
FetchObfuscatedServerLists,
controller.signalFetchObfuscatedServerLists)
}
}
if controller.config.UpgradeDownloadURLs != nil {
controller.runWaitGroup.Add(1)
go controller.upgradeDownloader()
}
/// Note: the connected reporter isn't started until a tunnel is
// established
controller.runWaitGroup.Add(1)
go controller.runTunnels()
controller.runWaitGroup.Add(1)
go controller.establishTunnelWatcher()
if controller.packetTunnelClient != nil {
controller.packetTunnelClient.Start()
}
// Wait while running
<-controller.runCtx.Done()
NoticeInfo("controller stopped")
if controller.packetTunnelClient != nil {
controller.packetTunnelClient.Stop()
}
// All workers -- runTunnels, establishment workers, and auxilliary
// workers such as fetch remote server list and untunneled uprade
// download -- operate with the controller run context and will all
// be interrupted when the run context is done.
controller.runWaitGroup.Wait()
controller.splitTunnelClassifier.Shutdown()
NoticeInfo("exiting controller")
NoticeExiting()
}
// SignalComponentFailure notifies the controller that an associated component has failed.
// This will terminate the controller.
func (controller *Controller) SignalComponentFailure() {
NoticeAlert("controller shutdown due to component failure")
controller.stopRunning()
}
// SetDynamicConfig overrides the sponsor ID and authorizations fields of the
// Controller config with the input values. The new values will be used in the
// next tunnel connection.
func (controller *Controller) SetDynamicConfig(sponsorID string, authorizations []string) {
controller.config.SetDynamicConfig(sponsorID, authorizations)
}
// TerminateNextActiveTunnel terminates the active tunnel, which will initiate
// establishment of a new tunnel.
func (controller *Controller) TerminateNextActiveTunnel() {
tunnel := controller.getNextActiveTunnel()
if tunnel != nil {
controller.SignalTunnelFailure(tunnel)
NoticeInfo("terminated tunnel: %s", tunnel.dialParams.ServerEntry.IpAddress)
}
}
// remoteServerListFetcher fetches an out-of-band list of server entries
// for more tunnel candidates. It fetches when signalled, with retries
// on failure.
func (controller *Controller) remoteServerListFetcher(
name string,
fetcher RemoteServerListFetcher,
signal <-chan struct{}) {
defer controller.runWaitGroup.Done()
var lastFetchTime monotime.Time
fetcherLoop:
for {
// Wait for a signal before fetching
select {
case <-signal:
case <-controller.runCtx.Done():
break fetcherLoop
}
// Skip fetch entirely (i.e., send no request at all, even when ETag would save
// on response size) when a recent fetch was successful
stalePeriod := controller.config.clientParameters.Get().Duration(
parameters.FetchRemoteServerListStalePeriod)
if lastFetchTime != 0 &&
lastFetchTime.Add(stalePeriod).After(monotime.Now()) {
continue
}
retryLoop:
for attempt := 0; ; attempt++ {
// Don't attempt to fetch while there is no network connectivity,
// to avoid alert notice noise.
if !WaitForNetworkConnectivity(
controller.runCtx,
controller.config.NetworkConnectivityChecker) {
break fetcherLoop
}
// Pick any active tunnel and make the next fetch attempt. If there's
// no active tunnel, the untunneledDialConfig will be used.
tunnel := controller.getNextActiveTunnel()
err := fetcher(
controller.runCtx,
controller.config,
attempt,
tunnel,
controller.untunneledDialConfig)
if err == nil {
lastFetchTime = monotime.Now()
break retryLoop
}
NoticeAlert("failed to fetch %s remote server list: %s", name, err)
retryPeriod := controller.config.clientParameters.Get().Duration(
parameters.FetchRemoteServerListRetryPeriod)
timer := time.NewTimer(retryPeriod)
select {
case <-timer.C:
case <-controller.runCtx.Done():
timer.Stop()
break fetcherLoop
}
}
}
NoticeInfo("exiting %s remote server list fetcher", name)
}
// establishTunnelWatcher terminates the controller if a tunnel
// has not been established in the configured time period. This
// is regardless of how many tunnels are presently active -- meaning
// that if an active tunnel was established and lost the controller
// is left running (to re-establish).
func (controller *Controller) establishTunnelWatcher() {
defer controller.runWaitGroup.Done()
timeout := controller.config.clientParameters.Get().Duration(
parameters.EstablishTunnelTimeout)
if timeout > 0 {
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case <-timer.C:
if !controller.hasEstablishedOnce() {
NoticeEstablishTunnelTimeout(timeout)
controller.SignalComponentFailure()
}
case <-controller.runCtx.Done():
}
}
NoticeInfo("exiting establish tunnel watcher")
}
// connectedReporter sends periodic "connected" requests to the Psiphon API.
// These requests are for server-side unique user stats calculation. See the
// comment in DoConnectedRequest for a description of the request mechanism.
// To ensure we don't over- or under-count unique users, only one connected
// request is made across all simultaneous multi-tunnels; and the connected
// request is repeated periodically for very long-lived tunnels.
// The signalReportConnected mechanism is used to trigger another connected
// request immediately after a reconnect.
func (controller *Controller) connectedReporter() {
defer controller.runWaitGroup.Done()
loop:
for {
// Pick any active tunnel and make the next connected request. No error
// is logged if there's no active tunnel, as that's not an unexpected condition.
reported := false
tunnel := controller.getNextActiveTunnel()
if tunnel != nil {
err := tunnel.serverContext.DoConnectedRequest()
if err == nil {
reported = true
} else {
NoticeAlert("failed to make connected request: %s", err)
}
}
// Schedule the next connected request and wait.
// Note: this duration is not a dynamic ClientParameter as
// the daily unique user stats logic specifically requires
// a "connected" request no more or less often than every
// 24 hours.
var duration time.Duration
if reported {
duration = 24 * time.Hour
} else {
duration = controller.config.clientParameters.Get().Duration(
parameters.PsiphonAPIConnectedRequestRetryPeriod)
}
timer := time.NewTimer(duration)
doBreak := false
select {
case <-controller.signalReportConnected:
case <-timer.C:
// Make another connected request
case <-controller.runCtx.Done():
doBreak = true
}
timer.Stop()
if doBreak {
break loop
}
}
NoticeInfo("exiting connected reporter")
}
func (controller *Controller) startOrSignalConnectedReporter() {
// session is nil when DisableApi is set
if controller.config.DisableApi {
return
}
// Start the connected reporter after the first tunnel is established.
// Concurrency note: only the runTunnels goroutine may access startedConnectedReporter.
if !controller.startedConnectedReporter {
controller.startedConnectedReporter = true
controller.runWaitGroup.Add(1)
go controller.connectedReporter()
} else {
select {
case controller.signalReportConnected <- *new(struct{}):
default:
}
}
}
// upgradeDownloader makes periodic attempts to complete a client upgrade
// download. DownloadUpgrade() is resumable, so each attempt has potential for
// getting closer to completion, even in conditions where the download or
// tunnel is repeatedly interrupted.
// An upgrade download is triggered by either a handshake response indicating
// that a new version is available; or after failing to connect, in which case
// it's useful to check, out-of-band, for an upgrade with new circumvention
// capabilities.
// Once the download operation completes successfully, the downloader exits
// and is not run again: either there is not a newer version, or the upgrade
// has been downloaded and is ready to be applied.
// We're assuming that the upgrade will be applied and the entire system
// restarted before another upgrade is to be downloaded.
//
// TODO: refactor upgrade downloader and remote server list fetcher to use
// common code (including the resumable download routines).
//
func (controller *Controller) upgradeDownloader() {
defer controller.runWaitGroup.Done()
var lastDownloadTime monotime.Time
downloadLoop:
for {
// Wait for a signal before downloading
var handshakeVersion string
select {
case handshakeVersion = <-controller.signalDownloadUpgrade:
case <-controller.runCtx.Done():
break downloadLoop
}
stalePeriod := controller.config.clientParameters.Get().Duration(
parameters.FetchUpgradeStalePeriod)
// Unless handshake is explicitly advertizing a new version, skip
// checking entirely when a recent download was successful.
if handshakeVersion == "" &&
lastDownloadTime != 0 &&
lastDownloadTime.Add(stalePeriod).After(monotime.Now()) {
continue
}
retryLoop:
for attempt := 0; ; attempt++ {
// Don't attempt to download while there is no network connectivity,
// to avoid alert notice noise.
if !WaitForNetworkConnectivity(
controller.runCtx,
controller.config.NetworkConnectivityChecker) {
break downloadLoop
}
// Pick any active tunnel and make the next download attempt. If there's
// no active tunnel, the untunneledDialConfig will be used.
tunnel := controller.getNextActiveTunnel()
err := DownloadUpgrade(
controller.runCtx,
controller.config,
attempt,
handshakeVersion,
tunnel,
controller.untunneledDialConfig)
if err == nil {
lastDownloadTime = monotime.Now()
break retryLoop
}
NoticeAlert("failed to download upgrade: %s", err)
timeout := controller.config.clientParameters.Get().Duration(
parameters.FetchUpgradeRetryPeriod)
timer := time.NewTimer(timeout)
select {
case <-timer.C:
case <-controller.runCtx.Done():
timer.Stop()
break downloadLoop
}
}
}
NoticeInfo("exiting upgrade downloader")
}
// runTunnels is the controller tunnel management main loop. It starts and stops
// establishing tunnels based on the target tunnel pool size and the current size
// of the pool. Tunnels are established asynchronously using worker goroutines.
//
// When there are no server entries for the target region/protocol, the
// establishCandidateGenerator will yield no candidates and wait before
// trying again. In the meantime, a remote server entry fetch may supply
// valid candidates.
//
// When a tunnel is established, it's added to the active pool. The tunnel's
// operateTunnel goroutine monitors the tunnel.
//
// When a tunnel fails, it's removed from the pool and the establish process is
// restarted to fill the pool.
func (controller *Controller) runTunnels() {
defer controller.runWaitGroup.Done()
// Start running
controller.startEstablishing()
loop:
for {
select {
case failedTunnel := <-controller.failedTunnels:
NoticeAlert("tunnel failed: %s", failedTunnel.dialParams.ServerEntry.IpAddress)
controller.terminateTunnel(failedTunnel)
// Clear the reference to this tunnel before calling startEstablishing,
// which will invoke a garbage collection.
failedTunnel = nil
// Concurrency note: only this goroutine may call startEstablishing/stopEstablishing,
// which reference controller.isEstablishing.
controller.startEstablishing()
case connectedTunnel := <-controller.connectedTunnels:
// Tunnel establishment has two phases: connection and activation.
//
// Connection is run concurrently by the establishTunnelWorkers, to minimize
// delay when it's not yet known which server and protocol will be available
// and unblocked.
//
// Activation is run serially, here, to minimize the overhead of making a
// handshake request and starting the operateTunnel management worker for a
// tunnel which may be discarded.
//
// When the active tunnel will complete establishment, establishment is
// stopped before activation. This interrupts all connecting tunnels and
// garbage collects their memory. The purpose is to minimize memory
// pressure when the handshake request is made. In the unlikely case that the
// handshake fails, establishment is restarted.
//
// Any delays in stopEstablishing will delay the handshake for the last
// active tunnel.
//
// In the typical case of TunnelPoolSize of 1, only a single handshake is
// performed and the homepages notices file, when used, will not be modifed
// after the NoticeTunnels(1) [i.e., connected] until NoticeTunnels(0) [i.e.,
// disconnected]. For TunnelPoolSize > 1, serial handshakes only ensures that
// each set of emitted NoticeHomepages is contiguous.
active, outstanding := controller.numTunnels()
// discardTunnel will be true here when already fully established.
discardTunnel := (outstanding <= 0)
isFirstTunnel := (active == 0)
isLastTunnel := (outstanding == 1)
if !discardTunnel {
if isLastTunnel {
controller.stopEstablishing()
}
err := connectedTunnel.Activate(controller.runCtx, controller)
if err != nil {
NoticeAlert("failed to activate %s: %s",
connectedTunnel.dialParams.ServerEntry.IpAddress, err)
discardTunnel = true
} else {
// It's unlikely that registerTunnel will fail, since only this goroutine
// calls registerTunnel -- and after checking numTunnels; so failure is not
// expected.
if !controller.registerTunnel(connectedTunnel) {
NoticeAlert("failed to register %s: %s",
connectedTunnel.dialParams.ServerEntry.IpAddress, err)
discardTunnel = true
}
}
// May need to replace this tunnel
if isLastTunnel && discardTunnel {
controller.startEstablishing()
}
}
if discardTunnel {
controller.discardTunnel(connectedTunnel)
// Clear the reference to this discarded tunnel and immediately run
// a garbage collection to reclaim its memory.
connectedTunnel = nil
DoGarbageCollection()
// Skip the rest of this case
break
}
NoticeActiveTunnel(
connectedTunnel.dialParams.ServerEntry.IpAddress,
connectedTunnel.dialParams.TunnelProtocol,
connectedTunnel.dialParams.ServerEntry.SupportsSSHAPIRequests())
if isFirstTunnel {
// The split tunnel classifier is started once the first tunnel is
// established. This first tunnel is passed in to be used to make
// the routes data request.
// A long-running controller may run while the host device is present
// in different regions. In this case, we want the split tunnel logic
// to switch to routes for new regions and not classify traffic based
// on routes installed for older regions.
// We assume that when regions change, the host network will also
// change, and so all tunnels will fail and be re-established. Under
// that assumption, the classifier will be re-Start()-ed here when
// the region has changed.
controller.splitTunnelClassifier.Start(connectedTunnel)
// Signal a connected request on each 1st tunnel establishment. For
// multi-tunnels, the session is connected as long as at least one
// tunnel is established.
controller.startOrSignalConnectedReporter()
// If the handshake indicated that a new client version is available,
// trigger an upgrade download.
// Note: serverContext is nil when DisableApi is set
if connectedTunnel.serverContext != nil &&
connectedTunnel.serverContext.clientUpgradeVersion != "" {
handshakeVersion := connectedTunnel.serverContext.clientUpgradeVersion
select {
case controller.signalDownloadUpgrade <- handshakeVersion:
default:
}
}
}
// Set the new tunnel as the transport for the packet tunnel. The packet tunnel
// client remains up when reestablishing, but no packets are relayed while there
// is no connected tunnel. UseTunnel will establish a new packet tunnel SSH
// channel over the new SSH tunnel and configure the packet tunnel client to use
// the new SSH channel as its transport.
//
// Note: as is, this logic is suboptimal for TunnelPoolSize > 1, as this would
// continuously initialize new packet tunnel sessions for each established
// server. For now, config validation requires TunnelPoolSize == 1 when
// the packet tunnel is used.
if controller.packetTunnelTransport != nil {
controller.packetTunnelTransport.UseTunnel(connectedTunnel)
}
// TODO: design issue -- might not be enough server entries with region/caps to ever fill tunnel slots;
// possible solution is establish target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
if controller.isFullyEstablished() {
controller.stopEstablishing()
}
case <-controller.runCtx.Done():
break loop
}
}
// Stop running
controller.stopEstablishing()
controller.terminateAllTunnels()
// Drain tunnel channels
close(controller.connectedTunnels)
for tunnel := range controller.connectedTunnels {
controller.discardTunnel(tunnel)
}
close(controller.failedTunnels)
for tunnel := range controller.failedTunnels {
controller.discardTunnel(tunnel)
}
NoticeInfo("exiting run tunnels")
}
// SignalSeededNewSLOK implements the TunnelOwner interface. This function
// is called by Tunnel.operateTunnel when the tunnel has received a new,
// previously unknown SLOK from the server. The Controller triggers an OSL
// fetch, as the new SLOK may be sufficient to access new OSLs.
func (controller *Controller) SignalSeededNewSLOK() {
select {
case controller.signalFetchObfuscatedServerLists <- *new(struct{}):
default:
}
}
// SignalTunnelFailure implements the TunnelOwner interface. This function
// is called by Tunnel.operateTunnel when the tunnel has detected that it
// has failed. The Controller will signal runTunnels to create a new
// tunnel and/or remove the tunnel from the list of active tunnels.
func (controller *Controller) SignalTunnelFailure(tunnel *Tunnel) {
// Don't block. Assumes the receiver has a buffer large enough for
// the typical number of operated tunnels. In case there's no room,
// terminate the tunnel (runTunnels won't get a signal in this case,
// but the tunnel will be removed from the list of active tunnels).
select {
case controller.failedTunnels <- tunnel:
default:
controller.terminateTunnel(tunnel)
}
}
// discardTunnel disposes of a successful connection that is no longer required.
func (controller *Controller) discardTunnel(tunnel *Tunnel) {
NoticeInfo("discard tunnel: %s", tunnel.dialParams.ServerEntry.IpAddress)
// TODO: not calling PromoteServerEntry, since that would rank the
// discarded tunnel before fully active tunnels. Can a discarded tunnel
// be promoted (since it connects), but with lower rank than all active
// tunnels?
tunnel.Close(true)
}
// registerTunnel adds the connected tunnel to the pool of active tunnels
// which are candidates for port forwarding. Returns true if the pool has an
// empty slot and false if the pool is full (caller should discard the tunnel).
func (controller *Controller) registerTunnel(tunnel *Tunnel) bool {
controller.tunnelMutex.Lock()
defer controller.tunnelMutex.Unlock()
if len(controller.tunnels) >= controller.config.TunnelPoolSize {
return false
}
// Perform a final check just in case we've established
// a duplicate connection.
for _, activeTunnel := range controller.tunnels {
if activeTunnel.dialParams.ServerEntry.IpAddress ==
tunnel.dialParams.ServerEntry.IpAddress {
NoticeAlert("duplicate tunnel: %s", tunnel.dialParams.ServerEntry.IpAddress)
return false
}
}
controller.establishedOnce = true
controller.tunnels = append(controller.tunnels, tunnel)
NoticeTunnels(len(controller.tunnels))
// Promote this successful tunnel to first rank so it's one
// of the first candidates next time establish runs.
// Connecting to a TargetServerEntry does not change the
// ranking.
if controller.config.TargetServerEntry == "" {
PromoteServerEntry(controller.config, tunnel.dialParams.ServerEntry.IpAddress)
}
return true
}
// hasEstablishedOnce indicates if at least one active tunnel has
// been established up to this point. This is regardeless of how many
// tunnels are presently active.
func (controller *Controller) hasEstablishedOnce() bool {
controller.tunnelMutex.Lock()
defer controller.tunnelMutex.Unlock()
return controller.establishedOnce
}
// isFullyEstablished indicates if the pool of active tunnels is full.
func (controller *Controller) isFullyEstablished() bool {
controller.tunnelMutex.Lock()
defer controller.tunnelMutex.Unlock()
return len(controller.tunnels) >= controller.config.TunnelPoolSize
}
// numTunnels returns the number of active and outstanding tunnels.
// Oustanding is the number of tunnels required to fill the pool of
// active tunnels.
func (controller *Controller) numTunnels() (int, int) {
controller.tunnelMutex.Lock()
defer controller.tunnelMutex.Unlock()
active := len(controller.tunnels)
outstanding := controller.config.TunnelPoolSize - len(controller.tunnels)
return active, outstanding
}
// terminateTunnel removes a tunnel from the pool of active tunnels
// and closes the tunnel. The next-tunnel state used by getNextActiveTunnel
// is adjusted as required.
func (controller *Controller) terminateTunnel(tunnel *Tunnel) {
controller.tunnelMutex.Lock()
defer controller.tunnelMutex.Unlock()
for index, activeTunnel := range controller.tunnels {
if tunnel == activeTunnel {
controller.tunnels = append(
controller.tunnels[:index], controller.tunnels[index+1:]...)
if controller.nextTunnel > index {
controller.nextTunnel--
}
if controller.nextTunnel >= len(controller.tunnels) {
controller.nextTunnel = 0
}
activeTunnel.Close(false)
NoticeTunnels(len(controller.tunnels))
break
}
}
}
// terminateAllTunnels empties the tunnel pool, closing all active tunnels.
// This is used when shutting down the controller.
func (controller *Controller) terminateAllTunnels() {
controller.tunnelMutex.Lock()
defer controller.tunnelMutex.Unlock()
// Closing all tunnels in parallel. In an orderly shutdown, each tunnel
// may take a few seconds to send a final status request. We only want
// to wait as long as the single slowest tunnel.
closeWaitGroup := new(sync.WaitGroup)
closeWaitGroup.Add(len(controller.tunnels))
for _, activeTunnel := range controller.tunnels {
tunnel := activeTunnel
go func() {
defer closeWaitGroup.Done()
tunnel.Close(false)
}()
}
closeWaitGroup.Wait()
controller.tunnels = make([]*Tunnel, 0)
controller.nextTunnel = 0
NoticeTunnels(len(controller.tunnels))
}
// getNextActiveTunnel returns the next tunnel from the pool of active
// tunnels. Currently, tunnel selection order is simple round-robin.
func (controller *Controller) getNextActiveTunnel() (tunnel *Tunnel) {
controller.tunnelMutex.Lock()
defer controller.tunnelMutex.Unlock()
for i := len(controller.tunnels); i > 0; i-- {
tunnel = controller.tunnels[controller.nextTunnel]
controller.nextTunnel =
(controller.nextTunnel + 1) % len(controller.tunnels)
return tunnel
}
return nil
}
// isActiveTunnelServerEntry is used to check if there's already
// an existing tunnel to a candidate server.
func (controller *Controller) isActiveTunnelServerEntry(
serverEntry *protocol.ServerEntry) bool {
controller.tunnelMutex.Lock()
defer controller.tunnelMutex.Unlock()
for _, activeTunnel := range controller.tunnels {
if activeTunnel.dialParams.ServerEntry.IpAddress == serverEntry.IpAddress {
return true
}
}
return false
}
// Dial selects an active tunnel and establishes a port forward
// connection through the selected tunnel. Failure to connect is considered
// a port forward failure, for the purpose of monitoring tunnel health.
func (controller *Controller) Dial(
remoteAddr string, alwaysTunnel bool, downstreamConn net.Conn) (conn net.Conn, err error) {
tunnel := controller.getNextActiveTunnel()
if tunnel == nil {
return nil, common.ContextError(errors.New("no active tunnels"))
}
// Perform split tunnel classification when feature is enabled, and if the remote
// address is classified as untunneled, dial directly.
if !alwaysTunnel && controller.config.SplitTunnelDNSServer != "" {
host, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
return nil, common.ContextError(err)
}
// Note: a possible optimization, when split tunnel is active and IsUntunneled performs
// a DNS resolution in order to make its classification, is to reuse that IP address in
// the following Dials so they do not need to make their own resolutions. However, the
// way this is currently implemented ensures that, e.g., DNS geo load balancing occurs
// relative to the outbound network.
if controller.splitTunnelClassifier.IsUntunneled(host) {
return controller.DirectDial(remoteAddr)
}
}
tunneledConn, err := tunnel.Dial(remoteAddr, alwaysTunnel, downstreamConn)
if err != nil {
return nil, common.ContextError(err)
}
return tunneledConn, nil
}
// DirectDial dials an untunneled TCP connection within the controller run context.
func (controller *Controller) DirectDial(remoteAddr string) (conn net.Conn, err error) {
return DialTCP(controller.runCtx, remoteAddr, controller.untunneledDialConfig)
}
// triggerFetches signals RSL, OSL, and upgrade download fetchers to begin, if
// not already running. triggerFetches is called when tunnel establishment
// fails to complete within a deadline and in other cases where local
// circumvention capabilities are lacking and we may require new server
// entries or client versions with new capabilities.
func (controller *Controller) triggerFetches() {
// Trigger a common remote server list fetch, since we may have failed
// to connect with all known servers. Don't block sending signal, since
// this signal may have already been sent.
// Don't wait for fetch remote to succeed, since it may fail and
// enter a retry loop and we're better off trying more known servers.
// TODO: synchronize the fetch response, so it can be incorporated
// into the server entry iterator as soon as available.
select {
case controller.signalFetchCommonRemoteServerList <- *new(struct{}):
default:
}
// Trigger an OSL fetch in parallel. Both fetches are run in parallel
// so that if one out of the common RLS and OSL set is large, it doesn't
// doesn't entirely block fetching the other.
select {
case controller.signalFetchObfuscatedServerLists <- *new(struct{}):
default:
}
// Trigger an out-of-band upgrade availability check and download.
// Since we may have failed to connect, we may benefit from upgrading
// to a new client version with new circumvention capabilities.
select {
case controller.signalDownloadUpgrade <- "":
default:
}
}
type protocolSelectionConstraints struct {
useUpstreamProxy bool
initialLimitProtocols protocol.TunnelProtocols
initialLimitProtocolsCandidateCount int
limitProtocols protocol.TunnelProtocols
replayCandidateCount int
}
func (p *protocolSelectionConstraints) hasInitialProtocols() bool {
return len(p.initialLimitProtocols) > 0 && p.initialLimitProtocolsCandidateCount > 0
}
func (p *protocolSelectionConstraints) isInitialCandidate(
excludeIntensive bool,
serverEntry *protocol.ServerEntry) bool {
return p.hasInitialProtocols() &&
len(serverEntry.GetSupportedProtocols(p.useUpstreamProxy, p.initialLimitProtocols, excludeIntensive)) > 0
}
func (p *protocolSelectionConstraints) isCandidate(
excludeIntensive bool,
serverEntry *protocol.ServerEntry) bool {
return len(p.limitProtocols) == 0 ||
len(serverEntry.GetSupportedProtocols(p.useUpstreamProxy, p.limitProtocols, excludeIntensive)) > 0
}
func (p *protocolSelectionConstraints) canReplay(
connectTunnelCount int,
excludeIntensive bool,
serverEntry *protocol.ServerEntry,
replayProtocol string) bool {
if connectTunnelCount > p.replayCandidateCount {
return false
}
return common.Contains(
p.supportedProtocols(connectTunnelCount, excludeIntensive, serverEntry),
replayProtocol)
}
func (p *protocolSelectionConstraints) supportedProtocols(
connectTunnelCount int,
excludeIntensive bool,
serverEntry *protocol.ServerEntry) []string {
limitProtocols := p.limitProtocols
if len(p.initialLimitProtocols) > 0 && p.initialLimitProtocolsCandidateCount > connectTunnelCount {
limitProtocols = p.initialLimitProtocols
}
return serverEntry.GetSupportedProtocols(
p.useUpstreamProxy,
limitProtocols,
excludeIntensive)
}
func (p *protocolSelectionConstraints) selectProtocol(
connectTunnelCount int,
excludeIntensive bool,
serverEntry *protocol.ServerEntry) (string, bool) {
candidateProtocols := p.supportedProtocols(connectTunnelCount, excludeIntensive, serverEntry)
if len(candidateProtocols) == 0 {
return "", false
}
// Pick at random from the supported protocols. This ensures that we'll
// eventually try all possible protocols. Depending on network
// configuration, it may be the case that some protocol is only available
// through multi-capability servers, and a simpler ranked preference of
// protocols could lead to that protocol never being selected.
index := prng.Intn(len(candidateProtocols))
return candidateProtocols[index], true
}
type candidateServerEntry struct {
serverEntry *protocol.ServerEntry
isServerAffinityCandidate bool
adjustedEstablishStartTime monotime.Time
}
// startEstablishing creates a pool of worker goroutines which will
// attempt to establish tunnels to candidate servers. The candidates
// are generated by another goroutine.
func (controller *Controller) startEstablishing() {
if controller.isEstablishing {
return
}
NoticeInfo("start establishing")
controller.concurrentEstablishTunnelsMutex.Lock()
controller.establishConnectTunnelCount = 0
controller.concurrentEstablishTunnels = 0
controller.concurrentIntensiveEstablishTunnels = 0
controller.peakConcurrentEstablishTunnels = 0
controller.peakConcurrentIntensiveEstablishTunnels = 0
controller.concurrentEstablishTunnelsMutex.Unlock()
DoGarbageCollection()
emitMemoryMetrics()
// Note: the establish context cancelFunc, controller.stopEstablish,
// is called in controller.stopEstablishing.
controller.isEstablishing = true
controller.establishCtx, controller.stopEstablish = context.WithCancel(controller.runCtx)
controller.establishWaitGroup = new(sync.WaitGroup)
controller.candidateServerEntries = make(chan *candidateServerEntry)
// The server affinity mechanism attempts to favor the previously
// used server when reconnecting. This is beneficial for user
// applications which expect consistency in user IP address (for
// example, a web site which prompts for additional user
// authentication when the IP address changes).
//
// Only the very first server, as determined by
// datastore.PromoteServerEntry(), is the server affinity candidate.
// Concurrent connections attempts to many servers are launched
// without delay, in case the affinity server connection fails.
// While the affinity server connection is outstanding, when any
// other connection is established, there is a short grace period
// delay before delivering the established tunnel; this allows some
// time for the affinity server connection to succeed first.
// When the affinity server connection fails, any other established
// tunnel is registered without delay.
//
// Note: the establishTunnelWorker that receives the affinity
// candidate is solely resonsible for closing
// controller.serverAffinityDoneBroadcast.
controller.serverAffinityDoneBroadcast = make(chan struct{})
controller.establishWaitGroup.Add(1)
go controller.launchEstablishing()
}
func (controller *Controller) launchEstablishing() {
defer controller.establishWaitGroup.Done()
// Before starting the establish tunnel workers, get and apply
// tactics, launching a tactics request if required.
//
// Wait only TacticsWaitPeriod for the tactics request to complete (or
// fail) before proceeding with tunnel establishment, in case the tactics
// request is blocked or takes very long to complete.
//
// An in-flight tactics request uses meek in round tripper mode, which
// uses less resources than meek tunnel relay mode. For this reason, the
// tactics request is not counted in concurrentIntensiveEstablishTunnels.
//
// TODO: HTTP/2 uses significantly more memory, so perhaps
// concurrentIntensiveEstablishTunnels should be counted in that case.
//
// Any in-flight tactics request or pending retry will be
// canceled when establishment is stopped.
if !controller.config.DisableTactics {
timeout := controller.config.clientParameters.Get().Duration(
parameters.TacticsWaitPeriod)
tacticsDone := make(chan struct{})
tacticsWaitPeriod := time.NewTimer(timeout)
defer tacticsWaitPeriod.Stop()
controller.establishWaitGroup.Add(1)
go controller.getTactics(tacticsDone)
select {
case <-tacticsDone:
case <-tacticsWaitPeriod.C:
}
tacticsWaitPeriod.Stop()
if controller.isStopEstablishing() {
// This check isn't strictly required by avoids the
// overhead of launching workers if establishment
// stopped while awaiting a tactics request.
return
}
}
// LimitTunnelProtocols and ConnectionWorkerPoolSize may be set by
// tactics.
// Initial- and LimitTunnelProtocols are set once per establishment, for
// consistent application of related probabilities (applied by
// ClientParametersSnapshot.TunnelProtocols). The
// establishLimitTunnelProtocolsState field must be read-only after this
// point, allowing concurrent reads by establishment workers.
p := controller.config.clientParameters.Get()
controller.protocolSelectionConstraints = &protocolSelectionConstraints{
useUpstreamProxy: controller.config.UseUpstreamProxy(),
initialLimitProtocols: p.TunnelProtocols(parameters.InitialLimitTunnelProtocols),
initialLimitProtocolsCandidateCount: p.Int(parameters.InitialLimitTunnelProtocolsCandidateCount),
limitProtocols: p.TunnelProtocols(parameters.LimitTunnelProtocols),
replayCandidateCount: p.Int(parameters.ReplayCandidateCount),
}
workerPoolSize := controller.config.clientParameters.Get().Int(
parameters.ConnectionWorkerPoolSize)
p = nil
// If InitialLimitTunnelProtocols is configured but cannot be satisfied,
// skip the initial phase in this establishment. This avoids spinning,
// unable to connect, in this case. InitialLimitTunnelProtocols is
// intended to prioritize certain protocols, but not strictly select them.
//
// The candidate count check is made with egress region selection unset.
// When an egress region is selected, it's the responsibility of the outer
// client to react to the following ReportAvailableRegions output and
// clear the user's selected region to prevent spinning, unable to
// connect. The initial phase is skipped only when
// InitialLimitTunnelProtocols cannot be satisfied _regardless_ of region
// selection.
//
// We presume that, in practise, most clients will have embedded server
// entries with capabilities for most protocols; and that clients will
// often perform RSL checks. So clients should most often have the
// necessary capabilities to satisfy InitialLimitTunnelProtocols. When
// this check fails, RSL/OSL/upgrade checks are triggered in order to gain
// new capabilities.
//
// LimitTunnelProtocols remains a hard limit, as using prohibited
// protocols may have some bad effect, such as a firewall blocking all
// traffic from a host.
if controller.protocolSelectionConstraints.initialLimitProtocolsCandidateCount > 0 {
egressRegion := "" // no egress region
initialCount, count := CountServerEntriesWithConstraints(
controller.config.UseUpstreamProxy(),
egressRegion,
controller.protocolSelectionConstraints)
if initialCount == 0 {
NoticeCandidateServers(
egressRegion,
controller.protocolSelectionConstraints,
initialCount,
count)
NoticeAlert("skipping initial limit tunnel protocols")
controller.protocolSelectionConstraints.initialLimitProtocolsCandidateCount = 0
// Since we were unable to satisfy the InitialLimitTunnelProtocols
// tactic, trigger RSL, OSL, and upgrade fetches to potentially
// gain new capabilities.
controller.triggerFetches()
}
}
// Report available egress regions. After a fresh install, the outer
// client may not have a list of regions to display; and
// LimitTunnelProtocols may reduce the number of available regions.
//
// When the outer client receives NoticeAvailableEgressRegions and the
// configured EgressRegion is not included in the region list, the outer
// client _should_ stop tunnel-core and prompt the user to change the
// region selection, as there are insufficient servers/capabilities to
// establish a tunnel in the selected region.
//
// This report is delayed until after tactics are likely to be applied;
// this avoids a ReportAvailableRegions reporting too many regions,
// followed shortly by a ReportAvailableRegions reporting fewer regions.
// That sequence could cause issues in the outer client UI.
//
// The reported regions are limited by protocolSelectionConstraints;
// in the case where an initial limit is in place, only regions available
// for the initial limit are reported. The initial phase will not complete
// if EgressRegion is set such that there are no server entries with the
// necessary protocol capabilities (either locally or from a remote server
// list fetch).
ReportAvailableRegions(
controller.config,
controller.protocolSelectionConstraints)
for i := 0; i < workerPoolSize; i++ {
controller.establishWaitGroup.Add(1)
go controller.establishTunnelWorker()
}
controller.establishWaitGroup.Add(1)
go controller.establishCandidateGenerator()
}
// stopEstablishing signals the establish goroutines to stop and waits
// for the group to halt.
func (controller *Controller) stopEstablishing() {
if !controller.isEstablishing {
return
}
NoticeInfo("stop establishing")
controller.stopEstablish()
// Note: establishCandidateGenerator closes controller.candidateServerEntries
// (as it may be sending to that channel).
controller.establishWaitGroup.Wait()
NoticeInfo("stopped establishing")
controller.isEstablishing = false
controller.establishCtx = nil
controller.stopEstablish = nil
controller.establishWaitGroup = nil
controller.candidateServerEntries = nil
controller.serverAffinityDoneBroadcast = nil
controller.concurrentEstablishTunnelsMutex.Lock()
peakConcurrent := controller.peakConcurrentEstablishTunnels
peakConcurrentIntensive := controller.peakConcurrentIntensiveEstablishTunnels
controller.establishConnectTunnelCount = 0
controller.concurrentEstablishTunnels = 0
controller.concurrentIntensiveEstablishTunnels = 0
controller.peakConcurrentEstablishTunnels = 0
controller.peakConcurrentIntensiveEstablishTunnels = 0
controller.concurrentEstablishTunnelsMutex.Unlock()
NoticeInfo("peak concurrent establish tunnels: %d", peakConcurrent)
NoticeInfo("peak concurrent resource intensive establish tunnels: %d", peakConcurrentIntensive)
emitMemoryMetrics()
DoGarbageCollection()
}
func (controller *Controller) getTactics(done chan struct{}) {
defer controller.establishWaitGroup.Done()
defer close(done)
// Limitation: GetNetworkID may not account for device VPN status, so
// Psiphon-over-Psiphon or Psiphon-over-other-VPN scenarios can encounter
// this issue:
//
// 1. Tactics are established when tunneling through a VPN and egressing
// through a remote region/ISP.
// 2. Psiphon is next run when _not_ tunneling through the VPN. Yet the
// network ID remains the same. Initial applied tactics will be for the
// remote egress region/ISP, not the local region/ISP.
tacticsRecord, err := tactics.UseStoredTactics(
GetTacticsStorer(),
controller.config.GetNetworkID())
if err != nil {
NoticeAlert("get stored tactics failed: %s", err)
// The error will be due to a local datastore problem.
// While we could proceed with the tactics request, this
// could result in constant tactics requests. So, abort.
return
}
if tacticsRecord == nil {
iterator, err := NewTacticsServerEntryIterator(
controller.config)
if err != nil {
NoticeAlert("tactics iterator failed: %s", err)
return
}
defer iterator.Close()
for iteration := 0; ; iteration++ {
if !WaitForNetworkConnectivity(
controller.runCtx,
controller.config.NetworkConnectivityChecker) {
return
}
serverEntry, err := iterator.Next()
if err != nil {
NoticeAlert("tactics iterator failed: %s", err)
return
}
if serverEntry == nil {
if iteration == 0 {
NoticeAlert("tactics request skipped: no capable servers")
return
}
iterator.Reset()
continue
}
tacticsRecord, err = controller.doFetchTactics(serverEntry)
if err == nil {
break
}
NoticeAlert("tactics request failed: %s", err)
// On error, proceed with a retry, as the error is likely
// due to a network failure.
//
// TODO: distinguish network and local errors and abort
// on local errors.
p := controller.config.clientParameters.Get()
timeout := prng.JitterDuration(
p.Duration(parameters.TacticsRetryPeriod),
p.Float(parameters.TacticsRetryPeriodJitter))
p = nil
tacticsRetryDelay := time.NewTimer(timeout)
select {
case <-controller.establishCtx.Done():
return
case <-tacticsRetryDelay.C:
}
tacticsRetryDelay.Stop()
}
}
if tacticsRecord != nil &&
prng.FlipWeightedCoin(tacticsRecord.Tactics.Probability) {
err := controller.config.SetClientParameters(
tacticsRecord.Tag, true, tacticsRecord.Tactics.Parameters)
if err != nil {
NoticeAlert("apply tactics failed: %s", err)
// The error will be due to invalid tactics values from
// the server. When ApplyClientParameters fails, all
// previous tactics values are left in place. Abort
// without retry since the server is highly unlikely
// to return different values immediately.
return
}
}
// Reclaim memory from the completed tactics request as we're likely
// to be proceeding to the memory-intensive tunnel establishment phase.
DoGarbageCollection()
emitMemoryMetrics()
}
func (controller *Controller) doFetchTactics(
serverEntry *protocol.ServerEntry) (*tactics.Record, error) {
canReplay := func(serverEntry *protocol.ServerEntry, replayProtocol string) bool {
return common.Contains(
serverEntry.GetSupportedTacticsProtocols(), replayProtocol)
}
selectProtocol := func(serverEntry *protocol.ServerEntry) (string, bool) {
tacticsProtocols := serverEntry.GetSupportedTacticsProtocols()
if len(tacticsProtocols) == 0 {
return "", false
}
index := prng.Intn(len(tacticsProtocols))
return tacticsProtocols[index], true
}
dialParams, err := MakeDialParameters(
controller.config,
canReplay,
selectProtocol,
serverEntry,
true,
0)
if dialParams == nil {
// MakeDialParameters may return nil, nil when the server entry can't
// satisfy protocol selection criteria. This case in not expected
// since NewTacticsServerEntryIterator should only return tactics-
// capable server entries and selectProtocol will select any tactics
// protocol.
err = errors.New("failed to make dial parameters")
}
if err != nil {
return nil, common.ContextError(err)
}
NoticeRequestingTactics(dialParams)
// TacticsTimeout should be a very long timeout, since it's not
// adjusted by tactics in a new network context, and so clients
// with very slow connections must be accomodated. This long
// timeout will not entirely block the beginning of tunnel
// establishment, which beings after the shorter TacticsWaitPeriod.
//
// Using controller.establishCtx will cancel FetchTactics
// if tunnel establishment completes first.
timeout := controller.config.clientParameters.Get().Duration(
parameters.TacticsTimeout)
ctx, cancelFunc := context.WithTimeout(
controller.establishCtx,
timeout)
defer cancelFunc()
// DialMeek completes the TCP/TLS handshakes for HTTPS
// meek protocols but _not_ for HTTP meek protocols.
//
// TODO: pre-dial HTTP protocols to conform with speed
// test RTT spec.
//
// TODO: ensure that meek in round trip mode will fail
// the request when the pre-dial connection is broken,
// to minimize the possibility of network ID mismatches.
meekConn, err := DialMeek(
ctx, dialParams.GetMeekConfig(), dialParams.GetDialConfig())
if err != nil {
return nil, common.ContextError(err)
}
defer meekConn.Close()
apiParams := getBaseAPIParameters(controller.config, dialParams)
tacticsRecord, err := tactics.FetchTactics(
ctx,
controller.config.clientParameters,
GetTacticsStorer(),
controller.config.GetNetworkID,
apiParams,
serverEntry.Region,
dialParams.TunnelProtocol,
serverEntry.TacticsRequestPublicKey,
serverEntry.TacticsRequestObfuscatedKey,
meekConn.RoundTrip)
if err != nil {
return nil, common.ContextError(err)
}
NoticeRequestedTactics(dialParams)
return tacticsRecord, nil
}
// establishCandidateGenerator populates the candidate queue with server entries
// from the data store. Server entries are iterated in rank order, so that promoted
// servers with higher rank are priority candidates.
func (controller *Controller) establishCandidateGenerator() {
defer controller.establishWaitGroup.Done()
defer close(controller.candidateServerEntries)
// establishStartTime is used to calculate and report the
// client's tunnel establishment duration.
//
// networkWaitDuration is the elapsed time spent waiting
// for network connectivity. This duration will be excluded
// from reported tunnel establishment duration.
establishStartTime := monotime.Now()
var totalNetworkWaitDuration time.Duration
applyServerAffinity, iterator, err := NewServerEntryIterator(controller.config)
if err != nil {
NoticeAlert("failed to iterate over candidates: %s", err)
controller.SignalComponentFailure()
return
}
defer iterator.Close()
// TODO: reconcile server affinity scheme with multi-tunnel mode
if controller.config.TunnelPoolSize > 1 {
applyServerAffinity = false
}
isServerAffinityCandidate := true
if !applyServerAffinity {
isServerAffinityCandidate = false
close(controller.serverAffinityDoneBroadcast)
}
loop:
// Repeat until stopped
for {
// For diagnostics, emits counts of the number of known server
// entries that satisfy both the egress region and tunnel protocol
// requirements (excluding excludeIntensive logic).
// Counts may change during establishment due to remote server
// list fetches, etc.
initialCount, count := CountServerEntriesWithConstraints(
controller.config.UseUpstreamProxy(),
controller.config.EgressRegion,
controller.protocolSelectionConstraints)
NoticeCandidateServers(
controller.config.EgressRegion,
controller.protocolSelectionConstraints,
initialCount,
count)
// A "round" consists of a new shuffle of the server entries
// and attempted connections up to the end of the server entry
// list, or parameters.EstablishTunnelWorkTime elapsed. Time
// spent waiting for network connectivity is excluded from
// round elapsed time.
//
// If the first round ends with no connection, remote server
// list and upgrade checks are launched.
roundStartTime := monotime.Now()
var roundNetworkWaitDuration time.Duration
// Send each iterator server entry to the establish workers
for {
networkWaitStartTime := monotime.Now()
if !WaitForNetworkConnectivity(
controller.establishCtx,
controller.config.NetworkConnectivityChecker) {
break loop
}
networkWaitDuration := monotime.Since(networkWaitStartTime)
roundNetworkWaitDuration += networkWaitDuration
totalNetworkWaitDuration += networkWaitDuration
serverEntry, err := iterator.Next()
if err != nil {
NoticeAlert("failed to get next candidate: %s", err)
controller.SignalComponentFailure()
break loop
}
if serverEntry == nil {
// Completed this iteration
break
}
if controller.config.TargetApiProtocol == protocol.PSIPHON_SSH_API_PROTOCOL &&
!serverEntry.SupportsSSHAPIRequests() {
continue
}
// adjustedEstablishStartTime is establishStartTime shifted
// to exclude time spent waiting for network connectivity.
adjustedEstablishStartTime := establishStartTime.Add(totalNetworkWaitDuration)
candidate := &candidateServerEntry{
serverEntry: serverEntry,
isServerAffinityCandidate: isServerAffinityCandidate,
adjustedEstablishStartTime: adjustedEstablishStartTime,
}
wasServerAffinityCandidate := isServerAffinityCandidate
// Note: there must be only one server affinity candidate, as it
// closes the serverAffinityDoneBroadcast channel.
isServerAffinityCandidate = false
// TODO: here we could generate multiple candidates from the
// server entry when there are many MeekFrontingAddresses.
select {
case controller.candidateServerEntries <- candidate:
case <-controller.establishCtx.Done():
break loop
}
workTime := controller.config.clientParameters.Get().Duration(
parameters.EstablishTunnelWorkTime)
if roundStartTime.Add(-roundNetworkWaitDuration).Add(workTime).Before(monotime.Now()) {
// Start over, after a brief pause, with a new shuffle of the server
// entries, and potentially some newly fetched server entries.
break
}
if wasServerAffinityCandidate {
// Don't start the next candidate until either the server affinity
// candidate has completed (success or failure) or is still working
// and the grace period has elapsed.
gracePeriod := controller.config.clientParameters.Get().Duration(
parameters.EstablishTunnelServerAffinityGracePeriod)
if gracePeriod > 0 {
timer := time.NewTimer(gracePeriod)
select {
case <-timer.C:
case <-controller.serverAffinityDoneBroadcast:
case <-controller.establishCtx.Done():
timer.Stop()
break loop
}
timer.Stop()
}
}
}
// Free up resources now, but don't reset until after the pause.
iterator.Close()
// Trigger RSL, OSL, and upgrade checks after failing to establish a
// tunnel in the first round.
controller.triggerFetches()
// After a complete iteration of candidate servers, pause before iterating again.
// This helps avoid some busy wait loop conditions, and also allows some time for
// network conditions to change. Also allows for fetch remote to complete,
// in typical conditions (it isn't strictly necessary to wait for this, there will
// be more rounds if required).
p := controller.config.clientParameters.Get()
timeout := prng.JitterDuration(
p.Duration(parameters.EstablishTunnelPausePeriod),
p.Float(parameters.EstablishTunnelPausePeriodJitter))
p = nil
timer := time.NewTimer(timeout)
select {
case <-timer.C:
// Retry iterating
case <-controller.establishCtx.Done():
timer.Stop()
break loop
}
timer.Stop()
iterator.Reset()
}
}
// establishTunnelWorker pulls candidates from the candidate queue, establishes
// a connection to the tunnel server, and delivers the connected tunnel to a channel.
func (controller *Controller) establishTunnelWorker() {
defer controller.establishWaitGroup.Done()
loop:
for candidateServerEntry := range controller.candidateServerEntries {
// Note: don't receive from candidateServerEntries and isStopEstablishing
// in the same select, since we want to prioritize receiving the stop signal
if controller.isStopEstablishing() {
break loop
}
// There may already be a tunnel to this candidate. If so, skip it.
if controller.isActiveTunnelServerEntry(candidateServerEntry.serverEntry) {
continue
}
// Select the tunnel protocol. The selection will be made at random
// from protocols supported by the server entry, optionally limited by
// LimitTunnelProtocols.
//
// When limiting concurrent resource intensive protocol connection
// workers, and at the limit, do not select resource intensive
// protocols since otherwise the candidate must be skipped.
//
// If at the limit and unabled to select a non-intensive protocol,
// skip the candidate entirely and move on to the next. Since
// candidates are shuffled it's likely that the next candidate is not
// intensive. In this case, a StaggerConnectionWorkersMilliseconds
// delay may still be incurred.
limitIntensiveConnectionWorkers := controller.config.clientParameters.Get().Int(
parameters.LimitIntensiveConnectionWorkers)
controller.concurrentEstablishTunnelsMutex.Lock()
excludeIntensive := false
if limitIntensiveConnectionWorkers > 0 &&
controller.concurrentIntensiveEstablishTunnels >= limitIntensiveConnectionWorkers {
excludeIntensive = true
}
canReplay := func(serverEntry *protocol.ServerEntry, replayProtocol string) bool {
return controller.protocolSelectionConstraints.canReplay(
controller.establishConnectTunnelCount,
excludeIntensive,
serverEntry,
replayProtocol)
}
selectProtocol := func(serverEntry *protocol.ServerEntry) (string, bool) {
return controller.protocolSelectionConstraints.selectProtocol(
controller.establishConnectTunnelCount,
excludeIntensive,
serverEntry)
}
// MakeDialParameters may return a replay instance, if the server
// entry has a previous, recent successful connection and
// tactics/config has not changed.
//
// In the first round of establishing, ServerEntryIterator will move
// potential replay candidates to the front of the iterator after the
// random shuffle, which greatly prioritizes previously successful
// servers for that round.
//
// As ServerEntryIterator does not unmarshal and validate replay
// candidate dial parameters, some potential replay candidates may
// have expired or otherwise ineligible dial parameters; in this case
// the candidate proceeds without replay.
//
// The ReplayCandidateCount tactic determines how many candidates may
// use replay. After ReplayCandidateCount candidates on any type,
// replay or no, replay is skipped. If ReplayCandidateCount exceed the
// intial round, replay may still be performed but the iterator no
// longer moves potential replay server entries to the front.
dialParams, err := MakeDialParameters(
controller.config,
canReplay,
selectProtocol,
candidateServerEntry.serverEntry,
false,
controller.establishConnectTunnelCount)
if dialParams == nil || err != nil {
controller.concurrentEstablishTunnelsMutex.Unlock()
// dialParams is nil when the server does not support any protocol
// that remains after applying the LimitTunnelProtocols parameter
// and the excludeIntensive flag.
// Silently skip the candidate in this case. Otherwise, emit error.
if err != nil {
NoticeInfo("failed to select protocol for %s: %s",
candidateServerEntry.serverEntry.IpAddress, err)
}
// Unblock other candidates immediately when server affinity
// candidate is skipped.
if candidateServerEntry.isServerAffinityCandidate {
close(controller.serverAffinityDoneBroadcast)
}
continue
}
// Increment establishConnectTunnelCount only after selectProtocol has
// succeeded to ensure InitialLimitTunnelProtocolsCandidateCount
// candidates use InitialLimitTunnelProtocols.
establishConnectTunnelCount := controller.establishConnectTunnelCount
controller.establishConnectTunnelCount += 1
isIntensive := protocol.TunnelProtocolIsResourceIntensive(dialParams.TunnelProtocol)
if isIntensive {
controller.concurrentIntensiveEstablishTunnels += 1
if controller.concurrentIntensiveEstablishTunnels > controller.peakConcurrentIntensiveEstablishTunnels {
controller.peakConcurrentIntensiveEstablishTunnels = controller.concurrentIntensiveEstablishTunnels
}
}
controller.concurrentEstablishTunnels += 1
if controller.concurrentEstablishTunnels > controller.peakConcurrentEstablishTunnels {
controller.peakConcurrentEstablishTunnels = controller.concurrentEstablishTunnels
}
controller.concurrentEstablishTunnelsMutex.Unlock()
// Apply stagger only now that we're past MakeDialParameters and
// protocol selection logic which may have caused the candidate to be
// skipped. The stagger logic delays dialing, and we don't want to
// incur that delay that when skipping.
//
// Locking staggerMutex serializes staggers, so that multiple workers
// don't simply sleep in parallel.
//
// The stagger is applied when establishConnectTunnelCount > 0 -- that
// is, for all but the first dial.
p := controller.config.clientParameters.Get()
staggerPeriod := p.Duration(parameters.StaggerConnectionWorkersPeriod)
staggerJitter := p.Float(parameters.StaggerConnectionWorkersJitter)
p = nil
if establishConnectTunnelCount > 0 && staggerPeriod != 0 {
controller.staggerMutex.Lock()
timer := time.NewTimer(prng.JitterDuration(staggerPeriod, staggerJitter))
select {
case <-timer.C:
case <-controller.establishCtx.Done():
}
timer.Stop()
controller.staggerMutex.Unlock()
}
// ConnectTunnel will allocate significant memory, so first attempt to
// reclaim as much as possible.
DoGarbageCollection()
tunnel, err := ConnectTunnel(
controller.establishCtx,
controller.config,
candidateServerEntry.adjustedEstablishStartTime,
dialParams)
controller.concurrentEstablishTunnelsMutex.Lock()
if isIntensive {
controller.concurrentIntensiveEstablishTunnels -= 1
}
controller.concurrentEstablishTunnels -= 1
controller.concurrentEstablishTunnelsMutex.Unlock()
// Periodically emit memory metrics during the establishment cycle.
if !controller.isStopEstablishing() {
emitMemoryMetrics()
}
// Immediately reclaim memory allocated by the establishment. In the case
// of failure, first clear the reference to the tunnel. In the case of
// success, the garbage collection may still be effective as the initial
// phases of some protocols involve significant memory allocation that
// could now be reclaimed.
if err != nil {
tunnel = nil
}
DoGarbageCollection()
if err != nil {
// Unblock other candidates immediately when server affinity
// candidate fails.
if candidateServerEntry.isServerAffinityCandidate {
close(controller.serverAffinityDoneBroadcast)
}
// Before emitting error, check if establish interrupted, in which
// case the error is noise.
if controller.isStopEstablishing() {
break loop
}
NoticeInfo("failed to connect to %s: %s",
candidateServerEntry.serverEntry.IpAddress, err)
continue
}
// Deliver connected tunnel.
// Don't block. Assumes the receiver has a buffer large enough for
// the number of desired tunnels. If there's no room, the tunnel must
// not be required so it's discarded.
select {
case controller.connectedTunnels <- tunnel:
default:
controller.discardTunnel(tunnel)
// Clear the reference to this discarded tunnel and immediately run
// a garbage collection to reclaim its memory.
tunnel = nil
DoGarbageCollection()
}
// Unblock other candidates only after delivering when
// server affinity candidate succeeds.
if candidateServerEntry.isServerAffinityCandidate {
close(controller.serverAffinityDoneBroadcast)
}
}
}
func (controller *Controller) isStopEstablishing() bool {
select {
case <-controller.establishCtx.Done():
return true
default:
}
return false
}
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Go
1
https://gitee.com/lqinggang/psiphon-tunnel-core.git
git@gitee.com:lqinggang/psiphon-tunnel-core.git
lqinggang
psiphon-tunnel-core
psiphon-tunnel-core
v2.0.2

搜索帮助