I am using the latestversion 4.0.0.
config
fullnameOverride: ""
nameOverride:
enabled:
custom: {}
imagePullSecret:
create: false
automaticReferenceInjection: true
registry:
username:
password:
email:
imagePullSecrets: []
hub:
revisionHistoryLimit:
config:
AzureAdOAuthenticator:
username_claim: unique_name
oauth_callback_url: http://localhost:8000/hub/oauth_callback
enable_auth_state: true
admin_users:
- testadmin
service:
type: ClusterIP
annotations: {}
ports:
nodePort:
appProtocol:
extraPorts: []
loadBalancerIP:
baseUrl: /
cookieSecret:
initContainers: []
nodeSelector: {}
tolerations: []
concurrentSpawnLimit: 64
consecutiveFailureLimit: 5
activeServerLimit:
deploymentStrategy:
type: Recreate
db:
type: sqlite-pvc
upgrade:
pvc:
annotations: {}
selector: {}
accessModes:
- ReadWriteOnce
storage: 1Gi
subPath:
storageClassName:
url:
password:
labels: {}
annotations: {}
command: []
args: []
extraConfig:
username_map: |
import os
from oauthenticator.azuread import AzureAdOAuthenticator
c.AzureAdOAuthenticator.manage_groups = True
c.AzureAdOAuthenticator.auth_state_groups_key = "user.groups"
c.AzureAdOAuthenticator.allowed_groups = {"grpA-int-admin,grpB-int-admin"}
c.AzureAdOAuthenticator.admin_groups = {"grpA-int-user,grpB-int-admin"}
class subAzureAuthenticator(AzureAdOAuthenticator):
def normalize_username(self, username):
"""Normalize the given username and return it
Override in subclasses if usernames need different normalization rules.
The default attempts to lowercase the username and apply `username_map` if it is
set.
"""
username = username.lower().split('@')[0]
clean_username = username.replace('.', '')
clean_username =clean_username.replace('_', '')
return clean_username
c.JupyterHub.authenticator_class = subAzureAuthenticator
extraFiles:
profileListConfig:
mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/profileList.py
mode: 0777
stringData: |
async def custom_options_form(spawner):
group_names = [group.name for group in spawner.user.groups]
print("----------------------------------------------")
print(group_names)
profile_configurations = {
'intA-group': [{
'display_name': 'Light',
'description': '2 CPU and 6 GB memory + VS-Code ',
'server_name': 'intA-group',
'kubespawner_override': {
'image': 'quay.io/jupyter/datascience-notebook:latest',
'namespace': 'intA-group',
'mem_limit': '6G',
'mem_guarantee': '6G',
'cpu_limit': 2
}
},{
'display_name': 'Medium',
'description': '6 CPU and 18 GB memory ',
'server_name': 'intA-group',
'kubespawner_override': {
'image': 'quay.io/jupyter/datascience-notebook:latest',
'namespace': 'intA-group',
'mem_limit': '18G',
'mem_guarantee': '18G',
'cpu_limit': 6
}
}],
'intB-group': [{
'display_name': 'Light',
'description': '2 CPU and 6 GB memory + VS-Code',
'server_name': 'intA-group',
'kubespawner_override': {
'image': 'quay.io/jupyter/datascience-notebook:latest',
'namespace': 'intA-group',
'mem_limit': '6G',
'mem_guarantee': '6G',
'cpu_limit': 2
}
},{
'display_name': 'Medium',
'description': '6 CPU and 18 GB memory ',
'server_name': 'intA-group',
'kubespawner_override': {
'image': 'quay.io/jupyter/datascience-notebook:latest',
'namespace': 'intA-group',
'mem_limit': '18G',
'mem_guarantee': '18G',
'cpu_limit': 6
}
}],
'default': [{
'display_name': 'v1.0',
'description': '2 CPU and 6 GB memory ',
'kubespawner_override': {
'image': 'quay.io/jupyter/datascience-notebook:latest',
'namespace': 'jupyterhub',
'mem_limit': '6G',
'mem_guarantee': '6G',
'cpu_limit': 2
}
}]
}
# Initialize an empty list for profile_list
profile_list = []
# Iterate through group names and map to profiles, with a default fallback
for group_name in group_names:
# Attempt to find an exact or approximate match in profile_configurations
matched_profiles = [
profile_configurations[key]
for key in profile_configurations
if group_name.lower() in key.lower()
]
# If matches are found, add them; otherwise, use the default profile
if matched_profiles:
for profiles in matched_profiles:
profile_list.extend(profiles)
else:
profile_list.extend(profile_configurations["default"])
spawner.profile_list = profile_list
spawner.lifecycle_hooks.update({
"postStart": {
"exec": {
"command": [
"sh",
"-c",
"if [ -d /home/jovyan/.ssh ]; then chmod 600 /home/jovyan/.ssh/id_rsa; chmod 600 /home/jovyan/.ssh/id_rsa.pub; fi"
]
}
}
})
return spawner._options_form_default()
c.KubeSpawner.options_form = custom_options_form
c.ContentsManager.allow_hidden = True
c.FileContentsManager.allow_hidden = True
extraEnv:
OAUTH_CLIENT_ID:
valueFrom:
secretKeyRef:
name: azuread-auth-config
key: client_id
OAUTH_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: azuread-auth-config
key: client_secret
AAD_TENANT_ID:
valueFrom:
secretKeyRef:
name: azuread-auth-config
key: tenant_id
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
image:
name: quay.io/jupyterhub/k8s-hub
tag: "4.0.0"
pullPolicy:
pullSecrets: []
resources: {}
podSecurityContext:
runAsNonRoot: true
fsGroup: 1000
seccompProfile:
type: "RuntimeDefault"
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
lifecycle: {}
# loadRoles:
# grpA-int-user-admin:
# description: admin for the intA-group users
# scopes:
# - 'read:users'
# - 'admin:users!group=grpA-int-user'
# - 'admin:groups!group=grpA-int-user'
# - 'admin-ui'
# users:
# - testuser
services: {}
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsCloudMetadataServer: true
dnsPortsKubeSystemNamespace: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
allowNamedServers: false
namedServerLimitPerUser:
authenticatePrometheus:
redirectToServer:
shutdownOnLogout:
templatePaths: []
templateVars: {}
livenessProbe:
# The livenessProbe's aim to give JupyterHub sufficient time to startup but
# be able to restart if it becomes unresponsive for ~5 min.
enabled: true
initialDelaySeconds: 300
periodSeconds: 10
failureThreshold: 30
timeoutSeconds: 3
readinessProbe:
# The readinessProbe's aim is to provide a successful startup indication,
# but following that never become unready before its livenessProbe fail and
# restarts it if needed. To become unready following startup serves no
# purpose as there are no other pod to fallback to in our non-HA deployment.
enabled: true
initialDelaySeconds: 0
periodSeconds: 2
failureThreshold: 1000
timeoutSeconds: 1
existingSecret:
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
rbac:
create: true
# proxy relates to the proxy pod, the proxy-public service, and the autohttps
# pod and proxy-http service.
proxy:
secretToken:
annotations: {}
deploymentStrategy:
type: Recreate
rollingUpdate:
service:
type: ClusterIP
labels: {}
annotations: {}
nodePorts:
http:
https:
disableHttpPort: false
extraPorts: []
loadBalancerIP:
loadBalancerSourceRanges: []
chp:
revisionHistoryLimit:
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
name: quay.io/jupyterhub/configurable-http-proxy
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
tag: "4.6.2" # https://github.com/jupyterhub/configurable-http-proxy/tags
pullPolicy:
pullSecrets: []
extraCommandLineFlags:
- --host-routing
livenessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
failureThreshold: 30
timeoutSeconds: 3
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 2
failureThreshold: 1000
timeoutSeconds: 1
resources: {}
defaultTarget:
errorTarget:
extraEnv: {}
nodeSelector: {}
tolerations: []
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsCloudMetadataServer: true
dnsPortsKubeSystemNamespace: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
extraPodSpec: {}
traefik:
revisionHistoryLimit:
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
name: traefik
tag: "v3.2.0" # ref: https://hub.docker.com/_/traefik?tab=tags
pullPolicy:
pullSecrets: []
hsts:
includeSubdomains: true
preload: false
maxAge: 15724800 # About 6 months
resources: {}
labels: {}
extraInitContainers: []
extraEnv: {}
extraVolumes: []
extraVolumeMounts: []
extraStaticConfig: {}
extraDynamicConfig: {}
nodeSelector: {}
tolerations: []
extraPorts: []
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsCloudMetadataServer: true
dnsPortsKubeSystemNamespace: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
secretSync:
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
name: quay.io/jupyterhub/k8s-secret-sync
tag: "4.0.0"
pullPolicy:
pullSecrets: []
resources: {}
labels: {}
https:
enabled: false
type: letsencrypt
#type: letsencrypt, manual, offload, secret
letsencrypt:
contactEmail:
# Specify custom server here (https://acme-staging-v02.api.letsencrypt.org/directory) to hit staging LE
acmeServer: https://acme-v02.api.letsencrypt.org/directory
manual:
key:
cert:
secret:
name:
key: tls.key
crt: tls.crt
hosts: []
singleuser:
podNameTemplate:
extraTolerations: []
nodeSelector: {}
extraNodeAffinity:
required: []
preferred: []
extraPodAffinity:
required: []
preferred: []
extraPodAntiAffinity:
required: []
preferred: []
networkTools:
image:
name: quay.io/jupyterhub/k8s-network-tools
tag: "4.0.0"
pullPolicy:
pullSecrets: []
resources: {}
cloudMetadata:
blockWithIptables: true
ip: 169.254.169.254
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: false
dnsPortsCloudMetadataServer: true
dnsPortsKubeSystemNamespace: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: false
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
events: true
extraAnnotations: {}
extraLabels:
hub.jupyter.org/network-access-hub: "true"
extraFiles: {}
extraEnv: {}
lifecycleHooks: {}
initContainers: []
extraContainers: []
allowPrivilegeEscalation: false
uid: 1000
fsGid: 100
serviceAccountName:
storage:
type: dynamic
extraLabels: {}
extraVolumes: []
extraVolumeMounts: []
static:
pvcName:
subPath: "{username}"
capacity: 10Gi
homeMountPath: /home/jovyan
dynamic:
storageClass:
pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce]
image:
name: quay.io/jupyterhub/k8s-singleuser-sample
tag: "4.0.0"
pullPolicy: IfNotPresent
pullSecrets: []
startTimeout: 300
cpu:
limit:
guarantee:
memory:
limit:
guarantee: 1G
extraResource:
limits: {}
guarantees: {}
cmd: jupyterhub-singleuser
defaultUrl:
extraPodConfig: {}
profileList: []
# scheduling relates to the user-scheduler pods and user-placeholder pods.
scheduling:
userScheduler:
enabled: true
revisionHistoryLimit:
replicas: 2
logLevel: 4
plugins:
score:
disabled:
- name: NodeResourcesBalancedAllocation
- name: NodeAffinity
- name: InterPodAffinity
- name: NodeResourcesFit
- name: ImageLocality
enabled:
- name: NodeAffinity
weight: 14631
- name: InterPodAffinity
weight: 1331
- name: NodeResourcesFit
weight: 121
- name: ImageLocality
weight: 11
pluginConfig:
- name: NodeResourcesFit
args:
scoringStrategy:
type: MostAllocated
resources:
- name: cpu
weight: 1
- name: memory
weight: 1
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
tag: "v1.30.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
pullPolicy:
pullSecrets: []
nodeSelector: {}
tolerations: []
labels: {}
annotations: {}
pdb:
enabled: true
maxUnavailable: 1
minAvailable:
resources: {}
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
podPriority:
enabled: false
globalDefault: false
defaultPriority: 0
imagePullerPriority: -5
userPlaceholderPriority: -10
userPlaceholder:
enabled: true
image:
name: registry.k8s.io/pause
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
# If you update this, also update prePuller.pause.image.tag
#
tag: "3.10"
pullPolicy:
pullSecrets: []
revisionHistoryLimit:
replicas: 0
labels: {}
annotations: {}
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
resources: {}
corePods:
tolerations:
- key: hub.jupyter.org/dedicated
operator: Equal
value: core
effect: NoSchedule
- key: hub.jupyter.org_dedicated
operator: Equal
value: core
effect: NoSchedule
nodeAffinity:
matchNodePurpose: prefer
userPods:
tolerations:
- key: hub.jupyter.org/dedicated
operator: Equal
value: user
effect: NoSchedule
- key: hub.jupyter.org_dedicated
operator: Equal
value: user
effect: NoSchedule
nodeAffinity:
matchNodePurpose: prefer
# prePuller relates to the hook|continuous-image-puller DaemonsSets
prePuller:
revisionHistoryLimit:
labels: {}
annotations: {}
resources: {}
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
extraTolerations: []
# hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet
hook:
enabled: true
pullOnlyOnChanges: true
# image and the configuration below relates to the hook-image-awaiter Job
image:
name: quay.io/jupyterhub/k8s-image-awaiter
tag: "4.0.0"
pullPolicy:
pullSecrets: []
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
podSchedulingWaitDuration: 10
nodeSelector: {}
tolerations: []
resources: {}
serviceAccount:
create: true
name:
annotations: {}
continuous:
enabled: true
pullProfileListImages: true
extraImages: {}
pause:
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
name: registry.k8s.io/pause
tag: "3.10"
pullPolicy:
pullSecrets: []
ingress:
enabled: false
annotations: {}
ingressClassName:
hosts: []
pathSuffix:
pathType: Prefix
tls: []
extraPaths: []
cull:
enabled: true
users: false # --cull-users
adminUsers: true # --cull-admin-users
removeNamedServers: false # --remove-named-servers
timeout: 3600 # --timeout
every: 600 # --cull-every
concurrency: 10 # --concurrency
maxAge: 0 # --max-age
debug:
enabled: false
global:
safeToShowValues: false
logs
Loading /usr/local/etc/jupyterhub/secret/values.yaml
No config at /usr/local/etc/jupyterhub/existing-secret/values.yaml
Loading /usr/local/etc/jupyterhub/jupyterhub_config.d config: profileList.py
Loading extra config: username_map
[I 2024-12-09 17:55:22.576 JupyterHub app:3346] Running JupyterHub version 5.2.1
[I 2024-12-09 17:55:22.576 JupyterHub app:3376] Using Authenticator: builtins.subAzureAuthenticator
[I 2024-12-09 17:55:22.576 JupyterHub app:3376] Using Spawner: kubespawner.spawner.KubeSpawner-7.0.0
[I 2024-12-09 17:55:22.576 JupyterHub app:3376] Using Proxy: jupyterhub.proxy.ConfigurableHTTPProxy-5.2.1
[I 2024-12-09 17:55:22.664 JupyterHub app:2919] Creating service jupyterhub-idle-culler without oauth.
[I 2024-12-09 17:55:22.692 JupyterHub reflector:297] watching for pods with label selector='component=singleuser-server' in namespace jupyterhub
[I 2024-12-09 17:55:22.697 JupyterHub app:3053] testadmin still running
[I 2024-12-09 17:55:22.698 JupyterHub app:3416] Initialized 1 spawners in 0.020 seconds
[I 2024-12-09 17:55:22.700 JupyterHub metrics:373] Found 1 active users in the last ActiveUserPeriods.twenty_four_hours
[I 2024-12-09 17:55:22.701 JupyterHub metrics:373] Found 1 active users in the last ActiveUserPeriods.seven_days
[I 2024-12-09 17:55:22.701 JupyterHub metrics:373] Found 1 active users in the last ActiveUserPeriods.thirty_days
[I 2024-12-09 17:55:22.701 JupyterHub app:3703] Not starting proxy
[I 2024-12-09 17:55:22.704 JupyterHub app:3739] Hub API listening on http://:8081/hub/
[I 2024-12-09 17:55:22.704 JupyterHub app:3741] Private Hub API connect url http://hub:8081/hub/
[I 2024-12-09 17:55:22.704 JupyterHub app:3615] Starting managed service jupyterhub-idle-culler
[I 2024-12-09 17:55:22.704 JupyterHub service:423] Starting service 'jupyterhub-idle-culler': ['python3', '-m', 'jupyterhub_idle_culler', '--url=http://localhost:8081/hub/api', '--timeout=3600', '--cull-every=600', '--concurrency=10']
[I 2024-12-09 17:55:22.705 JupyterHub service:136] Spawning python3 -m jupyterhub_idle_culler --url=http://localhost:8081/hub/api --timeout=3600 --cull-every=600 --concurrency=10
[I 2024-12-09 17:55:22.706 JupyterHub app:3772] JupyterHub is now running, internal Hub API at http://hub:8081/hub/
[I 2024-12-09 17:55:22.836 JupyterHub log:192] 200 GET /hub/api/ (jupyterhub-idle-culler@::1) 4.54ms
[I 2024-12-09 17:55:22.847 JupyterHub log:192] 200 GET /hub/api/users?state=[secret] (jupyterhub-idle-culler@::1) 10.86ms
[I 2024-12-09 17:55:45.771 JupyterHub log:192] 200 GET /hub/home (testadmin@::ffff:127.0.0.1) 37.88ms
[I 2024-12-09 17:55:46.839 JupyterHub proxy:356] Removing user testadmin from proxy (/user/testadmin/)
[I 2024-12-09 17:55:46.841 JupyterHub spawner:3282] Deleting pod jupyterhub/jupyter-testadmin
[I 2024-12-09 17:55:48.020 JupyterHub base:1333] User testadmin server took 1.180 seconds to stop
[I 2024-12-09 17:55:48.020 JupyterHub log:192] 204 DELETE /hub/api/users/testadmin/server?_xsrf=[secret] (testadmin@::ffff:127.0.0.1) 1194.16ms
----------------------------------------------
[]
[I 2024-12-09 17:55:49.042 JupyterHub provider:661] Creating oauth client jupyterhub-user-testadmin
[I 2024-12-09 17:55:49.053 JupyterHub log:192] 302 GET /hub/spawn/testadmin -> /hub/spawn-pending/testadmin (testadmin@::ffff:127.0.0.1) 30.87ms
[I 2024-12-09 17:55:49.081 JupyterHub reflector:297] watching for events with field selector='involvedObject.kind=Pod' in namespace jupyterhub
[I 2024-12-09 17:55:49.083 JupyterHub spawner:2931] Attempting to create pvc claim-testadmin, with timeout 3
[I 2024-12-09 17:55:49.101 JupyterHub spawner:2947] PVC claim-testadmin already exists, so did not create new pvc.
[I 2024-12-09 17:55:49.115 JupyterHub spawner:2890] Attempting to create pod jupyter-testadmin, with timeout 3
[I 2024-12-09 17:55:49.116 JupyterHub pages:397] testadmin is pending spawn
[I 2024-12-09 17:55:49.120 JupyterHub log:192] 200 GET /hub/spawn-pending/testadmin (testadmin@::ffff:127.0.0.1) 19.60ms
[I 2024-12-09 17:55:51.568 JupyterHub log:192] 200 GET /hub/api (@10.42.0.46) 0.38ms
[I 2024-12-09 17:55:51.610 JupyterHub log:192] 200 POST /hub/api/users/testadmin/activity (testadmin@10.42.0.46) 8.12ms
[I 2024-12-09 17:55:54.611 JupyterHub base:1124] User testadmin took 5.584 seconds to start
[I 2024-12-09 17:55:54.612 JupyterHub proxy:331] Adding user testadmin to proxy /user/testadmin/ => http://10.42.0.46:8888
[I 2024-12-09 17:55:54.617 JupyterHub users:899] Server testadmin is ready```