代码拉取完成,页面将自动刷新
custom: {}
hub:
service:
type: ClusterIP
annotations: {}
ports:
nodePort:
loadBalancerIP:
baseUrl: /
cookieSecret:
publicURL:
initContainers: []
uid: 1000
fsGid: 1000
nodeSelector: {}
concurrentSpawnLimit: 64
consecutiveFailureLimit: 5
activeServerLimit:
deploymentStrategy:
## type: Recreate
## - sqlite-pvc backed hubs require the Recreate deployment strategy as a
## typical PVC storage can only be bound to one pod at the time.
## - JupyterHub isn't designed to support being run in parallell. More work
## needs to be done in JupyterHub itself for a fully highly available (HA)
## deployment of JupyterHub on k8s is to be possible.
type: Recreate
db:
type: sqlite-pvc
upgrade:
pvc:
annotations: {}
selector: {}
accessModes:
- ReadWriteOnce
storage: 1Gi
subPath:
storageClassName: nfs
url:
password:
labels: {}
annotations: {}
extraConfig: {}
extraConfigMap: {}
extraEnv: {}
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
image:
name: jupyterhub/k8s-hub
tag: '0.9.1'
# pullSecrets:
# - secretName
resources:
requests:
cpu: 200m
memory: 512Mi
services: {}
imagePullSecret:
enabled: false
registry:
username:
email:
password:
pdb:
enabled: true
minAvailable: 1
networkPolicy:
enabled: false
ingress: []
egress:
- to:
- ipBlock:
cidr: 0.0.0.0/0
allowNamedServers: false
namedServerLimitPerUser:
authenticatePrometheus:
redirectToServer:
shutdownOnLogout:
templatePaths: []
templateVars: {}
livenessProbe:
enabled: false
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 10
# existingSecret: existing-secret
rbac:
enabled: true
proxy:
secretToken: ''
deploymentStrategy:
## type: Recreate
## - JupyterHub's interaction with the CHP proxy becomes a lot more robust
## with this configuration. To understand this, consider that JupyterHub
## during startup will interact a lot with the k8s service to reach a
## ready proxy pod. If the hub pod during a helm upgrade is restarting
## directly while the proxy pod is making a rolling upgrade, the hub pod
## could end up running a sequence of interactions with the old proxy pod
## and finishing up the sequence of interactions with the new proxy pod.
## As CHP proxy pods carry individual state this is very error prone. One
## outcome when not using Recreate as a strategy has been that user pods
## have been deleted by the hub pod because it considered them unreachable
## as it only configured the old proxy pod but not the new before trying
## to reach them.
type: Recreate
## rollingUpdate:
## - WARNING:
## This is required to be set explicitly blank! Without it being
## explicitly blank, k8s will let eventual old values under rollingUpdate
## remain and then the Deployment becomes invalid and a helm upgrade would
## fail with an error like this:
##
## UPGRADE FAILED
## Error: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
## Error: UPGRADE FAILED: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
rollingUpdate:
service:
type: LoadBalancer
labels: {}
annotations: {}
nodePorts:
http:
https:
loadBalancerIP:
loadBalancerSourceRanges: []
chp:
image:
name: jupyterhub/configurable-http-proxy
tag: 4.2.1
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 10
resources:
requests:
cpu: 200m
memory: 512Mi
traefik:
image:
name: traefik
tag: v2.1
hsts:
maxAge: 15724800 # About 6 months
includeSubdomains: false
resources: {}
secretSync:
image:
name: jupyterhub/k8s-secret-sync
tag: '0.9.1'
resources: {}
labels: {}
nodeSelector: {}
pdb:
enabled: true
minAvailable: 1
https:
enabled: true
type: letsencrypt
#type: letsencrypt, manual, offload, secret
letsencrypt:
contactEmail: ''
# Specify custom server here (https://acme-staging-v02.api.letsencrypt.org/directory) to hit staging LE
acmeServer: ''
manual:
key:
cert:
secret:
name: ''
key: tls.key
crt: tls.crt
hosts: []
networkPolicy:
enabled: false
ingress: []
egress:
- to:
- ipBlock:
cidr: 0.0.0.0/0
auth:
type: dummy
whitelist:
users:
admin:
access: true
users:
dummy:
password:
ldap:
dn:
search: {}
user: {}
user: {}
state:
enabled: false
cryptoKey:
singleuser:
extraTolerations: []
nodeSelector: {}
extraNodeAffinity:
required: []
preferred: []
extraPodAffinity:
required: []
preferred: []
extraPodAntiAffinity:
required: []
preferred: []
networkTools:
image:
name: jupyterhub/k8s-network-tools
tag: '0.9.1'
cloudMetadata:
enabled: false
ip: 169.254.169.254
networkPolicy:
enabled: false
ingress: []
egress:
# Required egress is handled by other rules so it's safe to modify this
- to:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 169.254.169.254/32
events: true
extraAnnotations: {}
extraLabels:
hub.jupyter.org/network-access-hub: 'true'
extraEnv: {}
lifecycleHooks: {}
initContainers: []
extraContainers: []
uid: 1000
fsGid: 100
serviceAccountName:
storage:
type: dynamic
extraLabels: {}
extraVolumes: []
extraVolumeMounts: []
static:
pvcName:
subPath: '{username}'
capacity: 10Gi
homeMountPath: /home/jovyan
dynamic:
storageClass:
pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce]
image:
name: jupyterhub/k8s-singleuser-sample
tag: '0.9.1'
pullPolicy: IfNotPresent
# pullSecrets:
# - secretName
imagePullSecret:
enabled: false
registry:
username:
email:
password:
startTimeout: 300
cpu:
limit:
guarantee:
memory:
limit:
guarantee: 1G
extraResource:
limits: {}
guarantees: {}
cmd: jupyterhub-singleuser
defaultUrl:
extraPodConfig: {}
scheduling:
userScheduler:
enabled: true
replicas: 2
logLevel: 4
## policy:
## Allows you to provide custom YAML/JSON to render into a JSON policy.cfg,
## a configuration file for the kube-scheduler binary.
## NOTE: The kube-scheduler binary in the kube-scheduler image we are
## currently using may be version bumped. It would for example happen if we
## increase the lowest supported k8s version for the helm chart. At this
## point, the provided policy.cfg may require a change along with that due
## to breaking changes in the kube-scheduler binary.
policy: {}
image:
name: registry.cn-hangzhou.aliyuncs.com/zqqq/google_containers_kube-scheduler-amd64
tag: v1.13.12
nodeSelector: {}
pdb:
enabled: true
minAvailable: 1
resources:
requests:
cpu: 50m
memory: 256Mi
podPriority:
enabled: false
globalDefault: false
defaultPriority: 0
userPlaceholderPriority: -10
userPlaceholder:
enabled: true
replicas: 0
corePods:
nodeAffinity:
matchNodePurpose: prefer
userPods:
nodeAffinity:
matchNodePurpose: prefer
prePuller:
hook:
enabled: true
image:
name: jupyterhub/k8s-image-awaiter
tag: '0.9.1'
continuous:
enabled: true
extraImages: {}
pause:
image:
name: registry.cn-hangzhou.aliyuncs.com/zqqq/google_containers_pause
tag: '3.1'
ingress:
enabled: false
annotations: {}
hosts: []
pathSuffix: ''
tls: []
cull:
enabled: true
users: false
removeNamedServers: false
timeout: 3600
every: 600
concurrency: 10
maxAge: 0
debug:
enabled: false
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。