chat version: 2.0.0
hub error log
[W 2023-05-11 08:27:49.529 JupyterHub app:2860] init_spawners did not complete within 10 seconds. Allowing to complete in the background.
[I 2023-05-11 08:27:49.529 JupyterHub app:3057] Not starting proxy
[I 2023-05-11 08:27:49.553 JupyterHub app:3093] Hub API listening on http://:8081/hub/
[I 2023-05-11 08:27:49.553 JupyterHub app:3095] Private Hub API connect url http://hub:8081/hub/
[I 2023-05-11 08:27:49.554 JupyterHub app:3104] Starting managed service jupyterhub-idle-culler
[I 2023-05-11 08:27:49.556 JupyterHub service:385] Starting service 'jupyterhub-idle-culler': ['python3', '-m', 'jupyterhub_idle_culler', '--url=http://localhost:8081/hub/api', '--timeout=3600', '--cull-every=600', '--concurrency=10']
[I 2023-05-11 08:27:49.563 JupyterHub service:133] Spawning python3 -m jupyterhub_idle_culler --url=http://localhost:8081/hub/api --timeout=3600 --cull-every=600 --concurrency=10
[I 2023-05-11 08:27:49.581 JupyterHub proxy:480] Adding route for Hub: / => http://hub:8081
[I 2023-05-11 08:27:49.589 JupyterHub app:3162] JupyterHub is now running, internal Hub API at http://hub:8081/hub/
[I 2023-05-11 08:27:49.753 JupyterHub log:186] 200 GET /hub/api/ (jupyterhub-idle-culler@::1) 26.91ms
[I 2023-05-11 08:27:49.763 JupyterHub log:186] 200 GET /hub/api/users?state=[secret] (jupyterhub-idle-culler@::1) 7.64ms
[E 2023-05-11 08:28:39.630 JupyterHub reflector:385] Initial list of events failed
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 383, in start
await self._list_and_update()
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 228, in _list_and_update
initial_resources_raw = await list_method(**kwargs)
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/api_client.py", line 185, in __call_api
response_data = await self.request(
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 193, in GET
return (await self.request("GET", url,
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 177, in request
r = await self.pool_manager.request(**args)
File "/usr/local/lib/python3.9/site-packages/aiohttp/client.py", line 634, in _request
break
File "/usr/local/lib/python3.9/site-packages/aiohttp/helpers.py", line 721, in __exit__
raise asyncio.TimeoutError from None
asyncio.exceptions.TimeoutError
[E 2023-05-11 08:28:39.632 JupyterHub reflector:385] Initial list of pods failed
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 383, in start
await self._list_and_update()
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 228, in _list_and_update
initial_resources_raw = await list_method(**kwargs)
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/api_client.py", line 185, in __call_api
response_data = await self.request(
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 193, in GET
return (await self.request("GET", url,
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 177, in request
r = await self.pool_manager.request(**args)
File "/usr/local/lib/python3.9/site-packages/aiohttp/client.py", line 634, in _request
break
File "/usr/local/lib/python3.9/site-packages/aiohttp/helpers.py", line 721, in __exit__
raise asyncio.TimeoutError from None
asyncio.exceptions.TimeoutError
[E 2023-05-11 08:28:39.632 JupyterHub spawner:2402] Reflector for events failed to start.
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/kubespawner/spawner.py", line 2400, in catch_reflector_start
await f
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 383, in start
await self._list_and_update()
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 228, in _list_and_update
initial_resources_raw = await list_method(**kwargs)
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/api_client.py", line 185, in __call_api
response_data = await self.request(
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 193, in GET
return (await self.request("GET", url,
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 177, in request
r = await self.pool_manager.request(**args)
File "/usr/local/lib/python3.9/site-packages/aiohttp/client.py", line 634, in _request
break
File "/usr/local/lib/python3.9/site-packages/aiohttp/helpers.py", line 721, in __exit__
raise asyncio.TimeoutError from None
asyncio.exceptions.TimeoutError
Task was destroyed but it is pending!
task: <Task pending name='Task-5' coro=<KubeSpawner._start_reflector.<locals>.catch_reflector_start() running at /usr/local/lib/python3.9/site-packages/kubespawner/spawner.py:2400> wait_for=<Task finished name='Task-4' coro=<ResourceReflector.start() done, defined at /usr/local/lib/python3.9/site-packages/kubespawner/reflector.py:370> exception=TimeoutError()>>
Task exception was never retrieved
future: <Task finished name='Task-4' coro=<ResourceReflector.start() done, defined at /usr/local/lib/python3.9/site-packages/kubespawner/reflector.py:370> exception=TimeoutError()>
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 383, in start
await self._list_and_update()
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 228, in _list_and_update
initial_resources_raw = await list_method(**kwargs)
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/api_client.py", line 185, in __call_api
response_data = await self.request(
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 193, in GET
return (await self.request("GET", url,
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 177, in request
r = await self.pool_manager.request(**args)
File "/usr/local/lib/python3.9/site-packages/aiohttp/client.py", line 634, in _request
break
File "/usr/local/lib/python3.9/site-packages/aiohttp/helpers.py", line 721, in __exit__
raise asyncio.TimeoutError from None
asyncio.exceptions.TimeoutError
Task was destroyed but it is pending!
task: <Task pending name='Task-2' coro=<JupyterHub.init_spawners() running at /usr/local/lib/python3.9/site-packages/jupyterhub/app.py:2558> wait_for=<_GatheringFuture pending cb=[<TaskWakeupMethWrapper object at 0x7f471ab92070>()]> cb=[JupyterHub.initialize.<locals>.log_init_time() at /usr/local/lib/python3.9/site-packages/jupyterhub/app.py:2840, <3 more>, <TaskWakeupMethWrapper object at 0x7f471ab40700>()]>
Task was destroyed but it is pending!
task: <Task pending name='Task-3' coro=<shared_client.<locals>.close_client_task() running at /usr/local/lib/python3.9/site-packages/kubespawner/clients.py:58> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f471ab920d0>()]>>
Exception ignored in: <coroutine object shared_client.<locals>.close_client_task at 0x7f471e8c3ac0>
RuntimeError: coroutine ignored GeneratorExit
Task was destroyed but it is pending!
task: <Task pending name='Task-8' coro=<JupyterHub.init_spawners.<locals>.check_spawner() running at /usr/local/lib/python3.9/site-packages/jupyterhub/app.py:2459> wait_for=<Future finished exception=TimeoutError()> cb=[_gather.<locals>._done_callback() at /usr/local/lib/python3.9/asyncio/tasks.py:767]>
Task exception was never retrieved
future: <Task finished name='Task-7' coro=<KubeSpawner._start_reflector.<locals>.catch_reflector_start() done, defined at /usr/local/lib/python3.9/site-packages/kubespawner/spawner.py:2398> exception=SystemExit(1)>
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/kubespawner/spawner.py", line 2400, in catch_reflector_start
await f
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 383, in start
await self._list_and_update()
File "/usr/local/lib/python3.9/site-packages/kubespawner/reflector.py", line 228, in _list_and_update
initial_resources_raw = await list_method(**kwargs)
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/api_client.py", line 185, in __call_api
response_data = await self.request(
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 193, in GET
return (await self.request("GET", url,
File "/usr/local/lib/python3.9/site-packages/kubernetes_asyncio/client/rest.py", line 177, in request
r = await self.pool_manager.request(**args)
File "/usr/local/lib/python3.9/site-packages/aiohttp/client.py", line 634, in _request
break
File "/usr/local/lib/python3.9/site-packages/aiohttp/helpers.py", line 721, in __exit__
raise asyncio.TimeoutError from None
asyncio.exceptions.TimeoutError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/jupyterhub/app.py", line 3313, in launch_instance
loop.start()
File "/usr/local/lib/python3.9/site-packages/tornado/platform/asyncio.py", line 215, in start
self.asyncio_loop.run_forever()
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 601, in run_forever
self._run_once()
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 1905, in _run_once
handle._run()
File "/usr/local/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.9/site-packages/kubespawner/spawner.py", line 2403, in catch_reflector_start
sys.exit(1)
SystemExit: 1
Task was destroyed but it is pending!
task: <Task pending name='Task-11' coro=<JupyterHub.initialize.<locals>.finish_init_spawners() running at /usr/local/lib/python3.9/site-packages/jupyterhub/app.py:2871> wait_for=<Task pending name='Task-2' coro=<JupyterHub.init_spawners() done, defined at /usr/local/lib/python3.9/site-packages/jupyterhub/app.py:2432> wait_for=<_GatheringFuture pending cb=[<TaskWakeupMethWrapper object at 0x7f471ab92070>()]> cb=[JupyterHub.initialize.<locals>.log_init_time() at /usr/local/lib/python3.9/site-packages/jupyterhub/app.py:2840, <3 more>, <TaskWakeupMethWrapper object at 0x7f471ab40700>()]>>
my chat values.yaml
fullnameOverride: ""
nameOverride:
custom: {}
imagePullSecret:
create: false
automaticReferenceInjection: true
registry:
username:
password:
email:
imagePullSecrets: []
hub:
revisionHistoryLimit:
config:
JupyterHub:
admin_access: true
authenticator_class: dummy
service:
type: ClusterIP
annotations: {}
ports:
nodePort:
extraPorts: []
loadBalancerIP:
baseUrl: /
cookieSecret:
initContainers: []
nodeSelector: {}
tolerations: []
concurrentSpawnLimit: 64
consecutiveFailureLimit: 5
activeServerLimit:
deploymentStrategy:
## type: Recreate
## - sqlite-pvc backed hubs require the Recreate deployment strategy as a
## typical PVC storage can only be bound to one pod at the time.
## - JupyterHub isn't designed to support being run in parallell. More work
## needs to be done in JupyterHub itself for a fully highly available (HA)
## deployment of JupyterHub on k8s is to be possible.
type: Recreate
db:
type: sqlite-pvc
upgrade:
pvc:
annotations: {}
selector: {}
accessModes:
- ReadWriteOnce
storage: 1Gi
subPath:
storageClassName:
url:
password:
labels: {}
annotations: {}
command: []
args: []
extraConfig: {}
extraFiles: {}
extraEnv: {}
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
image:
name: my_harbor/jupyterhub/k8s-hub
tag: "2.0.0"
pullPolicy:
pullSecrets: []
resources: {}
podSecurityContext:
fsGroup: 1000
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
lifecycle: {}
loadRoles: {}
services: {}
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
allowNamedServers: false
namedServerLimitPerUser:
authenticatePrometheus:
redirectToServer:
shutdownOnLogout:
templatePaths: []
templateVars: {}
livenessProbe:
# The livenessProbe's aim to give JupyterHub sufficient time to startup but
# be able to restart if it becomes unresponsive for ~5 min.
enabled: true
initialDelaySeconds: 300
periodSeconds: 10
failureThreshold: 30
timeoutSeconds: 3
readinessProbe:
# The readinessProbe's aim is to provide a successful startup indication,
# but following that never become unready before its livenessProbe fail and
# restarts it if needed. To become unready following startup serves no
# purpose as there are no other pod to fallback to in our non-HA deployment.
enabled: true
initialDelaySeconds: 0
periodSeconds: 2
failureThreshold: 1000
timeoutSeconds: 1
existingSecret:
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
rbac:
create: true
proxy:
secretToken:
annotations: {}
deploymentStrategy:
## type: Recreate
## - JupyterHub's interaction with the CHP proxy becomes a lot more robust
## with this configuration. To understand this, consider that JupyterHub
## during startup will interact a lot with the k8s service to reach a
## ready proxy pod. If the hub pod during a helm upgrade is restarting
## directly while the proxy pod is making a rolling upgrade, the hub pod
## could end up running a sequence of interactions with the old proxy pod
## and finishing up the sequence of interactions with the new proxy pod.
## As CHP proxy pods carry individual state this is very error prone. One
## outcome when not using Recreate as a strategy has been that user pods
## have been deleted by the hub pod because it considered them unreachable
## as it only configured the old proxy pod but not the new before trying
## to reach them.
type: Recreate
## rollingUpdate:
## - WARNING:
## This is required to be set explicitly blank! Without it being
## explicitly blank, k8s will let eventual old values under rollingUpdate
## remain and then the Deployment becomes invalid and a helm upgrade would
## fail with an error like this:
##
## UPGRADE FAILED
## Error: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
## Error: UPGRADE FAILED: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
rollingUpdate:
# service relates to the proxy-public service
service:
type: LoadBalancer
labels: {}
annotations: {}
nodePorts:
http:
https:
disableHttpPort: false
extraPorts: []
loadBalancerIP:
loadBalancerSourceRanges: []
# chp relates to the proxy pod, which is responsible for routing traffic based
# on dynamic configuration sent from JupyterHub to CHP's REST API.
chp:
revisionHistoryLimit:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: my_harbor/jupyterhub/configurable-http-proxy
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
tag: "4.5.3" # https://github.com/jupyterhub/configurable-http-proxy/releases
pullPolicy:
pullSecrets: []
extraCommandLineFlags: []
livenessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
failureThreshold: 30
timeoutSeconds: 3
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 2
failureThreshold: 1000
timeoutSeconds: 1
resources: {}
defaultTarget:
errorTarget:
extraEnv: {}
nodeSelector: {}
tolerations: []
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
extraPodSpec: {}
# traefik relates to the autohttps pod, which is responsible for TLS
# termination when proxy.https.type=letsencrypt.
traefik:
revisionHistoryLimit:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: my_harbor/ingress/traefik
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
tag: "v2.8.4" # ref: https://hub.docker.com/_/traefik?tab=tags
pullPolicy:
pullSecrets: []
hsts:
includeSubdomains: false
preload: false
maxAge: 15724800 # About 6 months
resources: {}
labels: {}
extraInitContainers: []
extraEnv: {}
extraVolumes: []
extraVolumeMounts: []
extraStaticConfig: {}
extraDynamicConfig: {}
nodeSelector: {}
tolerations: []
extraPorts: []
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
secretSync:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: my_harbor/jupyterhub/k8s-secret-sync
tag: "2.0.0"
pullPolicy:
pullSecrets: []
resources: {}
labels: {}
https:
enabled: false
type: letsencrypt
#type: letsencrypt, manual, offload, secret
letsencrypt:
contactEmail:
# Specify custom server here (https://acme-staging-v02.api.letsencrypt.org/directory) to hit staging LE
acmeServer: https://acme-v02.api.letsencrypt.org/directory
manual:
key:
cert:
secret:
name:
key: tls.key
crt: tls.crt
hosts: []
singleuser:
podNameTemplate:
extraTolerations: []
nodeSelector: {}
extraNodeAffinity:
required: []
preferred: []
extraPodAffinity:
required: []
preferred: []
extraPodAntiAffinity:
required: []
preferred: []
networkTools:
image:
name: my_harbor/jupyterhub/k8s-network-tools
tag: "2.0.0"
pullPolicy:
pullSecrets: []
resources: {}
cloudMetadata:
# block set to true will append a privileged initContainer using the
# iptables to block the sensitive metadata server at the provided ip.
blockWithIptables: true
ip: 169.254.169.254
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: false
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: false
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
events: true
extraAnnotations: {}
extraLabels:
hub.jupyter.org/network-access-hub: "true"
extraFiles: {}
extraEnv: {}
lifecycleHooks: {}
initContainers: []
extraContainers: []
allowPrivilegeEscalation: false
uid: 1000
fsGid: 100
serviceAccountName:
storage:
type: dynamic
extraLabels: {}
extraVolumes: []
extraVolumeMounts: []
static:
pvcName:
subPath: "{username}"
capacity: 10Gi
homeMountPath: /home/jovyan
dynamic:
storageClass:
pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce]
image:
name: my_harbor/jupyterhub/k8s-singleuser-sample
tag: "2.0.0"
pullPolicy:
pullSecrets: []
startTimeout: 36000
cpu:
limit:
guarantee:
memory:
limit:
guarantee: 1G
extraResource:
limits: {}
guarantees: {}
cmd: jupyterhub-singleuser
defaultUrl:
extraPodConfig: {}
profileList: []
scheduling:
userScheduler:
enabled: true
revisionHistoryLimit:
replicas: 2
logLevel: 4
# plugins are configured on the user-scheduler to make us score how we
# schedule user pods in a way to help us schedule on the most busy node. By
# doing this, we help scale down more effectively. It isn't obvious how to
# enable/disable scoring plugins, and configure them, to accomplish this.
#
# plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1
# migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations
#
plugins:
score:
# These scoring plugins are enabled by default according to
# https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
# 2022-02-22.
#
# Enabled with high priority:
# - NodeAffinity
# - InterPodAffinity
# - NodeResourcesFit
# - ImageLocality
# Remains enabled with low default priority:
# - TaintToleration
# - PodTopologySpread
# - VolumeBinding
# Disabled for scoring:
# - NodeResourcesBalancedAllocation
#
disabled:
# We disable these plugins (with regards to scoring) to not interfere
# or complicate our use of NodeResourcesFit.
- name: NodeResourcesBalancedAllocation
# Disable plugins to be allowed to enable them again with a different
# weight and avoid an error.
- name: NodeAffinity
- name: InterPodAffinity
- name: NodeResourcesFit
- name: ImageLocality
enabled:
- name: NodeAffinity
weight: 14631
- name: InterPodAffinity
weight: 1331
- name: NodeResourcesFit
weight: 121
- name: ImageLocality
weight: 11
pluginConfig:
# Here we declare that we should optimize pods to fit based on a
# MostAllocated strategy instead of the default LeastAllocated.
- name: NodeResourcesFit
args:
scoringStrategy:
resources:
- name: cpu
weight: 1
- name: memory
weight: 1
type: MostAllocated
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
# IMPORTANT: Bumping the minor version of this binary should go hand in
# hand with an inspection of the user-scheduelrs RBAC resources
# that we have forked in
# templates/scheduling/user-scheduler/rbac.yaml.
#
# Debugging advice:
#
# - Is configuration of kube-scheduler broken in
# templates/scheduling/user-scheduler/configmap.yaml?
#
# - Is the kube-scheduler binary's compatibility to work
# against a k8s api-server that is too new or too old?
#
# - You can update the GitHub workflow that runs tests to
# include "deploy/user-scheduler" in the k8s namespace report
# and reduce the user-scheduler deployments replicas to 1 in
# dev-config.yaml to get relevant logs from the user-scheduler
# pods. Inspect the "Kubernetes namespace report" action!
#
# - Typical failures are that kube-scheduler fails to search for
# resources via its "informers", and won't start trying to
# schedule pods before they succeed which may require
# additional RBAC permissions or that the k8s api-server is
# aware of the resources.
#
# - If "successfully acquired lease" can be seen in the logs, it
# is a good sign kube-scheduler is ready to schedule pods.
#
name: my_harbor/google_containers/kube-scheduler
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow. The minor version is pinned in the
# workflow, and should be updated there if a minor version bump is done
# here.
#
tag: "v1.23.10" # ref: https://github.com/kubernetes/website/blob/453ee631db5af9220a08d177d13a342d760b518f/content/en/releases/patch-releases.md
#tag: "v1.22.3"
pullPolicy:
pullSecrets: []
nodeSelector: {}
tolerations: []
labels: {}
annotations: {}
pdb:
enabled: true
maxUnavailable: 1
minAvailable:
resources: {}
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
podPriority:
enabled: false
globalDefault: false
defaultPriority: 0
imagePullerPriority: -5
userPlaceholderPriority: -10
userPlaceholder:
enabled: true
image:
name: my_harbor/google_containers/pause
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
# If you update this, also update prePuller.pause.image.tag
#
tag: "3.8"
pullPolicy:
pullSecrets: []
revisionHistoryLimit:
replicas: 0
labels: {}
annotations: {}
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
resources: {}
corePods:
tolerations:
- key: hub.jupyter.org/dedicated
operator: Equal
value: core
effect: NoSchedule
- key: hub.jupyter.org_dedicated
operator: Equal
value: core
effect: NoSchedule
nodeAffinity:
matchNodePurpose: prefer
userPods:
tolerations:
- key: hub.jupyter.org/dedicated
operator: Equal
value: user
effect: NoSchedule
- key: hub.jupyter.org_dedicated
operator: Equal
value: user
effect: NoSchedule
nodeAffinity:
matchNodePurpose: prefer
prePuller:
revisionHistoryLimit:
labels: {}
annotations: {}
resources: {}
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
extraTolerations: []
# hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet
hook:
enabled: true
pullOnlyOnChanges: true
# image and the configuration below relates to the hook-image-awaiter Job
image:
name: my_harbor/jupyterhub/k8s-image-awaiter
tag: "2.0.0"
pullPolicy:
pullSecrets: []
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
podSchedulingWaitDuration: 10
nodeSelector: {}
tolerations: []
resources: {}
serviceAccount:
create: true
name:
annotations: {}
continuous:
enabled: true
pullProfileListImages: true
extraImages: {}
pause:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: my_harbor/google_containers/pause
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
# If you update this, also update scheduling.userPlaceholder.image.tag
#
tag: "3.8"
pullPolicy:
pullSecrets: []
ingress:
enabled: false
annotations: {}
ingressClassName:
hosts: []
pathSuffix:
pathType: Prefix
tls: []
cull:
enabled: true
users: false # --cull-users
adminUsers: true # --cull-admin-users
removeNamedServers: false # --remove-named-servers
timeout: 3600 # --timeout
every: 600 # --cull-every
concurrency: 10 # --concurrency
maxAge: 0 # --max-age
debug:
enabled: false
global:
safeToShowValues: false