runtime-parameters.yaml 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. operator:
  2. enabled: true
  3. # Necessary to avoid conflicts in parallel deployments
  4. watchAllNamespaces: false
  5. containerPorts:
  6. metrics: 8100
  7. service:
  8. ports:
  9. metrics: 8104
  10. serviceAccount:
  11. create: true
  12. automountServiceAccountToken: true
  13. containerSecurityContext:
  14. enabled: true
  15. runAsUser: 1002
  16. runAsGroup: 1002
  17. runAsNonRoot: true
  18. readOnlyRootFilesystem: true
  19. allowPrivilegeEscalation: false
  20. capabilities:
  21. drop: ["ALL"]
  22. podSecurityContext:
  23. enabled: true
  24. fsGroup: 1002
  25. seccompProfile:
  26. type: RuntimeDefault
  27. metrics:
  28. enabled: true
  29. apiserver:
  30. enabled: true
  31. # Necessary to avoid conflicts in parallel deployments
  32. watchAllNamespaces: false
  33. service:
  34. ports:
  35. http: 8300
  36. grpc: 8302
  37. cluster:
  38. enabled: true
  39. service:
  40. type: ClusterIP
  41. worker:
  42. groupSpecs:
  43. - groupName: vib
  44. head:
  45. resourcesPreset: xlarge
  46. extraDeploy:
  47. # HACK: Kuberay does not allow setting a different service port from the container port, so we cannot set it at
  48. # 80 because of the permissions. Instead, we create a mock service that points to the cluster head at port 80
  49. - apiVersion: v1
  50. kind: Service
  51. metadata:
  52. name: kuberay-cluster-head-vib-svc
  53. labels:
  54. app.kubernetes.io/component: cluster-head
  55. spec:
  56. type: LoadBalancer
  57. selector:
  58. app.kubernetes.io/component: cluster-head
  59. ports:
  60. - port: 80
  61. name: http
  62. targetPort: 8265
  63. # HACK: The kuberay operator directly crates pod objects (not deployment or statefulsets) when creating RayCluster
  64. # objects. Therefore we cannot run goss tests on the head node. As an alternative, we will create a job that submits
  65. # a job to the RayCluster and we will check it in the Cypress tests
  66. - apiVersion: batch/v1
  67. kind: Job
  68. metadata:
  69. name: kuberay-vib-job
  70. spec:
  71. template:
  72. spec:
  73. restartPolicy: OnFailure
  74. containers:
  75. - name: job-runner
  76. image: '{{ include "kuberay.ray.image" . }}'
  77. securityContext:
  78. runAsNonRoot: true
  79. privileged: false
  80. readOnlyRootFilesystem: true
  81. allowPrivilegeEscalation: false
  82. capabilities:
  83. drop: ["ALL"]
  84. seccompProfile:
  85. type: "RuntimeDefault"
  86. command:
  87. - /bin/bash
  88. args:
  89. - -ec
  90. - |
  91. #!/bin/bash
  92. set -o errexit
  93. set -o nounset
  94. set -o pipefail
  95. . /opt/bitnami/scripts/libos.sh
  96. # Set the endpoint URL
  97. host="kuberay-cluster-head-svc"
  98. port="8265"
  99. kuberay_head_ready() {
  100. # Test the TCP connection with a timeout
  101. if timeout 5 bash -c "</dev/tcp/$host/$port"; then
  102. return 0
  103. else
  104. return 1
  105. fi
  106. }
  107. echo "0" > /tmp/ready
  108. info "Waiting for the head instance"
  109. if ! retry_while "kuberay_head_ready" 12 30; then
  110. error "Could not connect to the head instance"
  111. exit 1
  112. else
  113. info "Head ready! Running job"
  114. ray job submit --address http://${host}:${port} -- python -c "import ray; ray.init(); print(ray.cluster_resources())"
  115. fi
  116. volumeMounts:
  117. - name: tmp
  118. mountPath: /tmp
  119. volumes:
  120. - name: tmp
  121. emptyDir: {}