chore: remove very outdated kubernetes configs (#19731)

* cleanup: remove very outdated kubernetes configs

* chore: remove INSTALLATION.md that references k8s manifests

* chore: remove kubernetes/ from .prettierignore
This commit is contained in:
James Westbrook 2025-12-04 12:59:30 -07:00 committed by GitHub
parent 01284b92db
commit 2f68ac33b3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 0 additions and 216 deletions

View file

@ -3,8 +3,6 @@ pnpm-lock.yaml
package-lock.json package-lock.json
yarn.lock yarn.lock
kubernetes/
# Copy of .gitignore # Copy of .gitignore
.DS_Store .DS_Store
node_modules node_modules

View file

@ -1,35 +0,0 @@
### Installing Both Ollama and Open WebUI Using Kustomize
For cpu-only pod
```bash
kubectl apply -f ./kubernetes/manifest/base
```
For gpu-enabled pod
```bash
kubectl apply -k ./kubernetes/manifest
```
### Installing Both Ollama and Open WebUI Using Helm
Package Helm file first
```bash
helm package ./kubernetes/helm/
```
For cpu-only pod
```bash
helm install ollama-webui ./ollama-webui-*.tgz
```
For gpu-enabled pod
```bash
helm install ollama-webui ./ollama-webui-*.tgz --set ollama.resources.limits.nvidia.com/gpu="1"
```
Check the `kubernetes/helm/values.yaml` file to know which parameters are available for customization

View file

@ -1,4 +0,0 @@
# Helm Charts
Open WebUI Helm Charts are now hosted in a separate repo, which can be found here: https://github.com/open-webui/helm-charts
The charts are released at https://helm.openwebui.com.

View file

@ -1,8 +0,0 @@
resources:
- open-webui.yaml
- ollama-service.yaml
- ollama-statefulset.yaml
- webui-deployment.yaml
- webui-service.yaml
- webui-ingress.yaml
- webui-pvc.yaml

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ollama-service
namespace: open-webui
spec:
selector:
app: ollama
ports:
- protocol: TCP
port: 11434
targetPort: 11434

View file

@ -1,41 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ollama
namespace: open-webui
spec:
serviceName: "ollama"
replicas: 1
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
containers:
- name: ollama
image: ollama/ollama:latest
ports:
- containerPort: 11434
resources:
requests:
cpu: "2000m"
memory: "2Gi"
limits:
cpu: "4000m"
memory: "4Gi"
nvidia.com/gpu: "0"
volumeMounts:
- name: ollama-volume
mountPath: /root/.ollama
tty: true
volumeClaimTemplates:
- metadata:
name: ollama-volume
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 30Gi

View file

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: open-webui

View file

@ -1,38 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: open-webui-deployment
namespace: open-webui
spec:
replicas: 1
selector:
matchLabels:
app: open-webui
template:
metadata:
labels:
app: open-webui
spec:
containers:
- name: open-webui
image: ghcr.io/open-webui/open-webui:main
ports:
- containerPort: 8080
resources:
requests:
cpu: "500m"
memory: "500Mi"
limits:
cpu: "1000m"
memory: "1Gi"
env:
- name: OLLAMA_BASE_URL
value: "http://ollama-service.open-webui.svc.cluster.local:11434"
tty: true
volumeMounts:
- name: webui-volume
mountPath: /app/backend/data
volumes:
- name: webui-volume
persistentVolumeClaim:
claimName: open-webui-pvc

View file

@ -1,20 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: open-webui-ingress
namespace: open-webui
#annotations:
# Use appropriate annotations for your Ingress controller, e.g., for NGINX:
# nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: open-webui.minikube.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: open-webui-service
port:
number: 8080

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app: open-webui
name: open-webui-pvc
namespace: open-webui
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 2Gi

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: open-webui-service
namespace: open-webui
spec:
type: NodePort # Use LoadBalancer if you're on a cloud that supports it
selector:
app: open-webui
ports:
- protocol: TCP
port: 8080
targetPort: 8080
# If using NodePort, you can optionally specify the nodePort:
# nodePort: 30000

View file

@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base
patches:
- path: ollama-statefulset-gpu.yaml

View file

@ -1,17 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ollama
namespace: open-webui
spec:
selector:
matchLabels:
app: ollama
serviceName: "ollama"
template:
spec:
containers:
- name: ollama
resources:
limits:
nvidia.com/gpu: "1"