Bug description
We were installing jupyterhub 0.8.1 with network type in jupyter_config.py file as “host”. With this we are able to spawn docker images properly .But when I update the jupyterhub version to 1.3.0 with network type as “host” in jupyterhub_config.py. We are not able to spawn docker images.
Expected behaviour
Docker images created should spawn without issues with network type as “host” with jupyterhub version :1.3.0.
Let us know if any configuration needs to corrected to make docker images spawn properly in jupyterhub.
Actual behaviour
Docker images created failing to spawn with network type as “host” with jupyterhub version :1.3.0.
**Exception faced when network type used is “host”: ** The ‘ip’ trait of a Server instance expected a unicode string, not the NoneType None.
How to reproduce
Install jupyterhub version 1.3.0 with network type as “host” and docker image with notebook kernel.
Start jupyter notebook
Try to spawn the image.
Getting below error:
Error: HTTP 500: Internal Server Error (Error in Authenticator.pre_spawn_start: TraitError The ‘ip’ trait of a Server instance expected a unicode string, not the NoneType None.)
but when I change network type as “bridge” in jupyterhub_config.py then i am able to successfully spawn images But we need the network type as “host” as we having dependency on host network type for creating spark session in docker images.
- Go to ‘…’
- Click on ‘…’
- Scroll down to ‘…’
- See error
Your personal set up
Attaching jupyterhub_config.py file:
- OS:
- Version(s):
-
Full environment
# paste output of `pip freeze` or `conda list` here
-
Configuration
# jupyterhub_config.py
import os
import socket
c.PAMAuthenticator.open_sessions = False
# Configuration file for jupyterhub.
c.JupyterHub.pid_file = '/folder1/miniconda3/envs/jupyterhub/etc/jupyterhub.pid'
c.JupyterHub.logo_file = '/folder1/miniconda3/envs/jupyterhub/share/jupyter/hub/static/images/sampleimage_logo.png'
os.environ['OTDS_URL'] = 'http://inhyd-mag211-otds-launchpad.otxlab.net:8080'
c.Juyterhub.base_url = '/'
c.LocalOTDSOAuthenticator.login_service = 'folder1 Directory Service'
c.JupyterHub.cookie_secret_file = '/folder1/miniconda3/envs/jupyterhub/etc/jupyterhub_cookie_secret'
c.JupyterHub.db_url = '/folder1/miniconda3/envs/jupyterhub/etc/jupyterhub.sqlite'
# In order to activate OTDS integration, uncomment the below line
c.JupyterHub.authenticator_class = 'oauthenticator.LocalOTDSOAuthenticator'
# To enable kerberos authentication in jupyterhub uncomment below line
c.LocalOTDSOAuthenticator.client_id = 'notebook_175'
c.LocalOTDSOAuthenticator.client_secret = 'vomSAHty68e98Y6FO2iOqVVz33t8odY7'
c.LocalOTDSOAuthenticator.username_key = 'name'
c.LocalOTDSOAuthenticator.callback_logout_url = 'http://10.96.94.175:8000/hub/login'
c.LocalOTDSOAuthenticator.oauth_callback_url = 'http://10.96.94.175:8000/hub/oauth_callback'
c.LocalOTDSOAuthenticator.resource_id = "231ecd3a-40b6-4d8d-a5f0-c8d85b5f2993"
c.LocalOTDSOAuthenticator.resource_name = "m4_demo_notebook"
c.Authenticator.admin_users = {"sampleimage"}
c.JupyterHub.proxy_api_ip = '0.0.0.0'
c.JupyterHub.hub_port = 8090
notebook_dir = '/home/jupyter/work'
c.DockerSpawner.notebook_dir = notebook_dir
c.JupyterHub.log_level = 'DEBUG'
# Enable debug-logging of the single-user server
c.Spawner.debug = True
# Enable debug-logging of the single-user server
c.LocalProcessSpawner.debug = False
# pass the maprticket file name here
MAPR_TICKET_FILE_PATH = ''
maprticket = "NO FILE FOUND"
if os.path.isfile(MAPR_TICKET_FILE_PATH) and os.access(MAPR_TICKET_FILE_PATH, os.R_OK):
f = open(MAPR_TICKET_FILE_PATH, "r")
maprticket = f.read()
else:
print ("Either the file is missing or not readable")
# in prod environment bda and spark master url are set in Ambari service
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.DockerSpawner.environment = {'ELECTRON_URL':'http://10.96.209.56:8110/',
'PUBLISH_MODEL':'https://10.96.94.175:8081/publish/modelPublish',
'CLIENT_ID': 'notebook_client_175',
'CLIENT_SECRET':'vomSAHty68e98Y6FO2iOqVVz33t8odY7',
'SPARK_HOST':'10.96.94.156',
'RESOURCE_MANAGER_ADDRESS':'10.96.94.156:8032',
'HISTORY_LOG_DIRECTORY':'hdfs://clouderamaster631.lab.folder1.com:8020/user/spark/applicationHistory',
'HISTORY_SERVER_ADDRESS':'10.96.94.156:18088',
'PYSPARK_PYTHON':'/opt/miniconda2/envs/python3/bin/python',
'DRIVER_MEMORY' :'4g',
'MAPR_TICKET_FILE':maprticket,
'RESOURCE_MANAGER_HOST':'{10.96.94.156}',
'GIT_USER_REPO':notebook_dir,
'DOMAIN_NAME':'folder1.com',
'LIVY_SERVER':'http://10.96.94.89:8998/',
'SPARKMAGIC_IGNORE_SSL_ERRORS':'false',
'PUBLISH_TO_RESTAPI':'true',
'EXTRA_SPARK_ARGS':'--num-executors 40 --executor-cores 2 --executor-memory 2GB --conf spark.dynamicAllocation.minExecutors=1 --conf spark.dynamicAllocation.maxExecutors=40 --conf spark.dynamicAllocation.enabled=true --conf spark.shuffle.service.enabled=true --conf spark.executor.instances=2' #Include here any custom spark required parameter e.g. --conf spark.executor.instances=4
}
c.DockerSpawner.volumes = {'/folder1/notebooks/{username}': notebook_dir, }
c.DockerSpawner.remove_containers = True
c.DockerSpawner.extra_host_config = {'network_mode': 'host'}
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = 'host'
DISTRIBUTION='DISTRIBUTION_NAME'
if DISTRIBUTION == 'mapr':
c.newDockerSpawner .images = {'images':[{'name':'sampleimage-notebook-base', 'description':'Python 3'},
{'name':'sampleimage-notebook', 'description':'Spark kernels (Pyspark, Scala, SparkSQL)'},
{'name':'sampleimage-notebook-tensorflow', 'description':'TensorFlow 2.5.0'},
{'name':'sampleimage-notebook-pytorch', 'description':'PyTorch 1.5'}
]}
else:
c.newDockerSpawner .images = {'images':[{'name':'sampleimage-notebook-base', 'description':'Python 3'},
{'name':'sampleimage-notebook', 'description':'Spark kernels (Pyspark, Scala, SparkSQL)'},
{'name':'sampleimage-notebook-tensorflow', 'description':'TensorFlow 2.5.0'},
{'name':'sampleimage-notebook-pytorch', 'description':'PyTorch 1.5'},
{'name':'sampleimage-notebook-sparkmagic', 'description':'Spark Magic'}
]}
c.newDockerSpawner .memoryLimit = "4G"
c.newDockerSpawner .cpuLimit = "2"
c.JupyterHub.spawner_class = 'dockerspawner.newDockerSpawner '
import netifaces
docker0 = netifaces.ifaddresses('docker0')
docker0_ipv4 = docker0[netifaces.AF_INET][0]
c.JupyterHub.hub_ip = docker0_ipv4['addr']
from jupyterhub.utils import random_port
import subprocess
import os
NOTEBOOK_SERVICE_UID = 1000
def create_dir_hook(spawner):
spawner.environment['NB_USER'] = 'jupyter' # get system user
spawned_user = spawner.user.name # get the spawned user
spawner.environment['ACCESS_TOKEN'] = os.getenv(spawned_user+"_accesstoken")
spawner.environment['REFRESH_TOKEN'] = os.getenv(spawned_user+"_refreshtoken")
spawner.environment['OTDS_URL'] = os.getenv("OTDS_URL")
submit_job_as_single_user='True'
if(submit_job_as_single_user.lower() == 'false'):
if(spawner.image=='sampleimage-notebook' or spawner.image=='sampleimage-notebook-sparkmagic' ):
spawner.environment['NB_USER'] = spawned_user # get the spawned user
volume_path = os.path.join('/folder1/notebooks', spawned_user)
uid = NOTEBOOK_SERVICE_UID
spawner.port = random_port()
if not os.path.exists(volume_path):
# create a directory with umask 0755
# hub and container user must have the same UID to be writeable
# still readable by other users on the system
os.makedirs(volume_path, 0o755)
#subprocess.Popen ("git init" , shell=True , cwd=volume_path).communicate()
pass
# the user folder should be owner by the user configured in the docker container
# if not, the end user will be not able to create any notebook
os.chown(volume_path, uid, uid)
# attach the hook function to the spawner
c.Spawner.pre_spawn_hook = create_dir_hook
c.DockerSpawner.debug = True
-
Logs
# paste relevant logs here, if any
Unhandled error starting mnbuser's server: The 'ip' trait of a Server instance expected a unicode string, not the NoneType None.