| @ -0,0 +1,87 @@ | |||||
| # Versión para openshift | |||||
| ## Instalación | |||||
| ### Crear un proyecto | |||||
| oc new-project unifi-server | |||||
| #### Permitir que los pods puedan funcionar como root | |||||
| oc adm policy add-scc-to-user anyuid -z default | |||||
| ### Añadir la app desde gitea | |||||
| oc new-app | |||||
| Tal y como está la estructura de directorios, deberia detectar automáticamente una compilación Python | |||||
| ## Asignación de los volúmenes | |||||
| ### Si la pvc no está creada | |||||
| oc set volume deployment.apps/jugaralpadel --add -t pvc --claim-size=300M --name=jugaralpadel-eventos-migrations --claim-name='jugaralpadel-eventos-migrations' --mount-path='/app/eventos/migrations' --claim-class='lvms-vg1' | |||||
| oc set volume deployment.apps/jugaralpadel --add -t pvc --claim-size=300M --name=jugaralpadel-media --claim-name='jugaralpadel-media' --mount-path='/app/mediafiles' --claim-class='lvms-vg1' | |||||
| oc set volume deployment.apps/jugaralpadel --add -t pvc --claim-size=300M --name=reymota-reymotausers-migrations --claim-name='reymota-reymotausers-migrations' --mount-path='/app/reymotausers/migrations' --claim-class='lvms-vg1' | |||||
| oc set volume deployment.apps/jugaralpadel --add -t pvc --claim-size=50G --name=static-volume --claim-name='static-volume' --mount-path='/app/staticfiles' --claim-class='lvms-vg1' | |||||
| ### Si la pvc ya está creada | |||||
| oc set volume deployment.apps/jugaralpadel --add -t pvc --name=jugaralpadel-eventos-migrations --claim-name='jugaralpadel-eventos-migrations' --mount-path='/app/eventos/migrations' | |||||
| oc set volume deployment.apps/jugaralpadel --add -t pvc --name=jugaralpadel-media --claim-name='jugaralpadel-media' --mount-path='/app/mediafiles' | |||||
| oc set volume deployment.apps/jugaralpadel --add -t pvc --name=reymota-reymotausers-migrations --claim-name='reymota-reymotausers-migrations' --mount-path='/app/reymotausers/migrations' | |||||
| oc set volume deployment.apps/jugaralpadel --add -t pvc --name=static-volume --claim-name='static-volume' --mount-path='/app/staticfiles' | |||||
| ## Exponer el servicio | |||||
| oc expose service/jugaralpadel | |||||
| ### postgresql | |||||
| Los ficheros yaml están en el directorio Yamls. | |||||
| Se crea el deployment y el servicio con la shell creaDB.sh | |||||
| Se borran con borraDB.sh | |||||
| Hay que tener en cuenta que la PVC *no* se crea en estas shells. Hay que crearla a mano. Esto se hace para no perder los datos. | |||||
| ## Cosas a hacer la primera vez | |||||
| Desde dentro de la shell del pod | |||||
| python manage.py createsuperuser | |||||
| python manage.py makemigrations | |||||
| python manage.py migrate | |||||
| # Cambiar la secuencia de los id | |||||
| ALTER SEQUENCE tablename_id_seq RESTART WITH nn; | |||||
| esto se hace cuando restauro un volcado de la bd sobre una instalación nueva. Si hay índices ya creados, hay que reinciar a partir del último. | |||||
| # GIT | |||||
| avoid credentials: | |||||
| git config credential.helper store | |||||
| después se hace un pull o push o lo que sea que te pida el usuario y password, los metes y ya la próxima no hace falta | |||||
| ## PVC y paths | |||||
| volumeMounts: | |||||
| - mountPath: /app/mediafiles | |||||
| name: jugaralpadel-media | |||||
| - mountPath: /app/eventos/migrations | |||||
| name: jugaralpadel-eventos-migrations | |||||
| - mountPath: /app/reymotausers/migrations | |||||
| name: jugaralpadel-reymotausers-migrations | |||||
| - mountPath: /app/staticfiles | |||||
| name: static-volume | |||||
| @ -0,0 +1,95 @@ | |||||
| --- | |||||
| apiVersion: v1 | |||||
| kind: Template | |||||
| labels: | |||||
| template: hello | |||||
| message: Una nueva app hello ha sido creada" | |||||
| metadata: | |||||
| annotations: | |||||
| description: This is an example of application template in OpenShift 3 | |||||
| iconClass: default | |||||
| tags: hello, world | |||||
| name: hello-world-template | |||||
| namespace: unifi-server | |||||
| objects: | |||||
| - kind: Service | |||||
| metadata: | |||||
| name: hello-world-service | |||||
| spec: | |||||
| ports: | |||||
| - name: http | |||||
| nodePort: 0 | |||||
| port: ${{SERVICE_PORT}} | |||||
| protocol: TCP | |||||
| targetPort: ${{INTERNAL_PORT}} | |||||
| selector: | |||||
| name: hello | |||||
| - kind: Route | |||||
| metadata: | |||||
| labels: | |||||
| name: hello | |||||
| name: hello-world-route | |||||
| spec: | |||||
| host: ${APPLICATION_DOMAIN} | |||||
| tls: | |||||
| termination: edge | |||||
| to: | |||||
| kind: Service | |||||
| name: hello-world-service | |||||
| - kind: ReplicationController | |||||
| metadata: | |||||
| name: hello-world-rc | |||||
| spec: | |||||
| replicas: 1 | |||||
| selector: | |||||
| name: hello | |||||
| template: | |||||
| metadata: | |||||
| creationTimestamp: null | |||||
| labels: | |||||
| name: hello | |||||
| spec: | |||||
| containers: | |||||
| - env: | |||||
| - name: MESSAGE | |||||
| value: ${GREATING_MESSAGE} | |||||
| image: docker.io/kalise/nodejs-web-app:latest | |||||
| imagePullPolicy: IfNotPresent | |||||
| name: hello | |||||
| ports: | |||||
| - containerPort: ${{INTERNAL_PORT}} | |||||
| name: http | |||||
| protocol: TCP | |||||
| resources: | |||||
| limits: | |||||
| cpu: 25m | |||||
| memory: 128Mi | |||||
| securityContext: | |||||
| privileged: false | |||||
| livenessProbe: | |||||
| tcpSocket: | |||||
| port: ${{INTERNAL_PORT}} | |||||
| timeoutSeconds: 1 | |||||
| initialDelaySeconds: 30 | |||||
| terminationMessagePath: /dev/termination-log | |||||
| dnsPolicy: ClusterFirst | |||||
| nodeSelector: | |||||
| region: primary | |||||
| restartPolicy: Always | |||||
| serviceAccount: "" | |||||
| parameters: | |||||
| - description: The exposed hostname that will route to the Hello World service | |||||
| name: APPLICATION_DOMAIN | |||||
| value: "hello-world.cloud.openshift.b-cloud.it" | |||||
| required: true | |||||
| - description: The internal port used by the pods | |||||
| name: INTERNAL_PORT | |||||
| value: "8080" | |||||
| required: true | |||||
| - description: The port exposed by the service | |||||
| name: SERVICE_PORT | |||||
| value: "9000" | |||||
| required: true | |||||
| - description: Greating message | |||||
| name: GREATING_MESSAGE | |||||
| value: "Hello OpenShift" | |||||
| @ -0,0 +1,20 @@ | |||||
| #!/bin/bash | |||||
| if which mongosh > /dev/null 2>&1; then | |||||
| mongo_init_bin='mongosh' | |||||
| else | |||||
| mongo_init_bin='mongo' | |||||
| fi | |||||
| "${mongo_init_bin}" <<EOF | |||||
| use ${MONGO_AUTHSOURCE} | |||||
| db.auth("${MONGO_INITDB_ROOT_USERNAME}", "${MONGO_INITDB_ROOT_PASSWORD}") | |||||
| db.createUser({ | |||||
| user: "${MONGO_USER}", | |||||
| pwd: "${MONGO_PASS}", | |||||
| roles: [ | |||||
| { db: "${MONGO_DBNAME}", role: "dbOwner" }, | |||||
| { db: "${MONGO_DBNAME}_stat", role: "dbOwner" } | |||||
| ] | |||||
| }) | |||||
| EOF | |||||
| @ -0,0 +1,98 @@ | |||||
| apiVersion: apps/v1 | |||||
| kind: Deployment | |||||
| metadata: | |||||
| annotations: | |||||
| deployment.kubernetes.io/revision: "6" | |||||
| image.openshift.io/triggers: '[{"from":{"kind":"ImageStreamTag","name":"jugaralpadel:latest"},"fieldPath":"spec.template.spec.containers[?(@.name==\"jugaralpadel\")].image"}]' | |||||
| openshift.io/generated-by: OpenShiftNewApp | |||||
| creationTimestamp: "2025-02-21T12:21:29Z" | |||||
| generation: 6 | |||||
| labels: | |||||
| app: jugaralpadel | |||||
| app.kubernetes.io/component: jugaralpadel | |||||
| app.kubernetes.io/instance: jugaralpadel | |||||
| name: jugaralpadel | |||||
| namespace: jugaralpadel | |||||
| resourceVersion: "8355394" | |||||
| uid: 1dcbd32d-4e97-40da-9d09-16c9fe5e2eee | |||||
| spec: | |||||
| progressDeadlineSeconds: 600 | |||||
| replicas: 1 | |||||
| revisionHistoryLimit: 10 | |||||
| selector: | |||||
| matchLabels: | |||||
| deployment: jugaralpadel | |||||
| strategy: | |||||
| rollingUpdate: | |||||
| maxSurge: 25% | |||||
| maxUnavailable: 25% | |||||
| type: RollingUpdate | |||||
| template: | |||||
| metadata: | |||||
| annotations: | |||||
| openshift.io/generated-by: OpenShiftNewApp | |||||
| creationTimestamp: null | |||||
| labels: | |||||
| deployment: jugaralpadel | |||||
| spec: | |||||
| containers: | |||||
| - env: | |||||
| - name: DEBUG | |||||
| value: "False" | |||||
| - name: VERSION | |||||
| value: "11.0" | |||||
| image: image-registry.openshift-image-registry.svc:5000/jugaralpadel/jugaralpadel@sha256:1c86b3845c1869a1c42e83fe6e44a12045d09da28f9b3a675e85f0900e7a7ac3 | |||||
| imagePullPolicy: IfNotPresent | |||||
| name: jugaralpadel | |||||
| ports: | |||||
| - containerPort: 8080 | |||||
| protocol: TCP | |||||
| resources: {} | |||||
| terminationMessagePath: /dev/termination-log | |||||
| terminationMessagePolicy: File | |||||
| volumeMounts: | |||||
| - mountPath: /app/eventos/migrations | |||||
| name: jugaralpadel-eventos-migrations | |||||
| - mountPath: /app/mediafiles | |||||
| name: jugaralpadel-media | |||||
| - mountPath: /app/reymotausers/migrations | |||||
| name: reymota-reymotausers-migrations | |||||
| - mountPath: /app/staticfiles | |||||
| name: static-volume | |||||
| dnsPolicy: ClusterFirst | |||||
| restartPolicy: Always | |||||
| schedulerName: default-scheduler | |||||
| securityContext: {} | |||||
| terminationGracePeriodSeconds: 30 | |||||
| volumes: | |||||
| - name: jugaralpadel-eventos-migrations | |||||
| persistentVolumeClaim: | |||||
| claimName: jugaralpadel-eventos-migrations | |||||
| - name: jugaralpadel-media | |||||
| persistentVolumeClaim: | |||||
| claimName: jugaralpadel-media | |||||
| - name: reymota-reymotausers-migrations | |||||
| persistentVolumeClaim: | |||||
| claimName: reymota-reymotausers-migrations | |||||
| - name: static-volume | |||||
| persistentVolumeClaim: | |||||
| claimName: static-volume | |||||
| status: | |||||
| availableReplicas: 1 | |||||
| conditions: | |||||
| - lastTransitionTime: "2025-02-21T12:21:33Z" | |||||
| lastUpdateTime: "2025-02-21T12:21:33Z" | |||||
| message: Deployment has minimum availability. | |||||
| reason: MinimumReplicasAvailable | |||||
| status: "True" | |||||
| type: Available | |||||
| - lastTransitionTime: "2025-02-21T12:21:29Z" | |||||
| lastUpdateTime: "2025-02-21T12:21:33Z" | |||||
| message: ReplicaSet "jugaralpadel-94b8766fb" has successfully progressed. | |||||
| reason: NewReplicaSetAvailable | |||||
| status: "True" | |||||
| type: Progressing | |||||
| observedGeneration: 6 | |||||
| readyReplicas: 1 | |||||
| replicas: 1 | |||||
| updatedReplicas: 1 | |||||
| @ -0,0 +1 @@ | |||||
| oc creaete -f unifi-network-application.yaml | |||||
| @ -0,0 +1,80 @@ | |||||
| apiVersion: apps/v1 | |||||
| kind: Deployment | |||||
| metadata: | |||||
| name: mongodb | |||||
| namespace: unifi-server | |||||
| spec: | |||||
| progressDeadlineSeconds: 600 | |||||
| replicas: 1 | |||||
| revisionHistoryLimit: 10 | |||||
| selector: | |||||
| matchLabels: | |||||
| deployment: mongodb | |||||
| strategy: | |||||
| rollingUpdate: | |||||
| maxSurge: 25% | |||||
| maxUnavailable: 25% | |||||
| type: RollingUpdate | |||||
| template: | |||||
| metadata: | |||||
| annotations: | |||||
| openshift.io/generated-by: OpenShiftNewApp | |||||
| creationTimestamp: null | |||||
| labels: | |||||
| deployment: mongodb | |||||
| spec: | |||||
| containers: | |||||
| - env: | |||||
| - name: MONGO_INITDB_ROOT_USERNAME | |||||
| value: "root" | |||||
| - name: MONGO_INITDB_ROOT_PASSWORD | |||||
| value: "Dsa-0213" | |||||
| - name: MONGO_USER | |||||
| value: "unifi" | |||||
| - name: MONGO_PASS | |||||
| value: "Rey-1176" | |||||
| - name: MONGO_PORT | |||||
| value: "27017" | |||||
| - name: MONGO_DBNAME | |||||
| value: "unifi" | |||||
| - name: MONGO_AUTHSOURCE | |||||
| value: "admin" | |||||
| - name: MONGO_SSL | |||||
| value: "True" | |||||
| image: mongo:8.0 | |||||
| imagePullPolicy: IfNotPresent | |||||
| name: mongodb | |||||
| ports: | |||||
| - containerPort: 27017 | |||||
| protocol: TCP | |||||
| resources: {} | |||||
| terminationMessagePath: /dev/termination-log | |||||
| terminationMessagePolicy: File | |||||
| volumeMounts: | |||||
| - mountPath: /config | |||||
| name: mongodb-volume-1 | |||||
| dnsPolicy: ClusterFirst | |||||
| restartPolicy: Always | |||||
| schedulerName: default-scheduler | |||||
| securityContext: {} | |||||
| terminationGracePeriodSeconds: 30 | |||||
| volumes: | |||||
| - name: mongodb-volume-1 | |||||
| persistentVolumeClaim: | |||||
| claimName: mongodb-volume-1 | |||||
| --- | |||||
| apiVersion: v1 | |||||
| kind: Service | |||||
| metadata: | |||||
| name: mongodb | |||||
| namespace: unifi-server | |||||
| spec: | |||||
| ports: | |||||
| - name: 27017-tcp | |||||
| port: 27017 | |||||
| protocol: TCP | |||||
| targetPort: 27017 | |||||
| selector: | |||||
| deployment: mongodb | |||||
| sessionAffinity: None | |||||
| type: ClusterIP | |||||
| @ -0,0 +1,110 @@ | |||||
| apiVersion: apps/v1 | |||||
| kind: Deployment | |||||
| metadata: | |||||
| labels: | |||||
| app: unifi-network-application | |||||
| app.kubernetes.io/component: unifi-network-application | |||||
| app.kubernetes.io/instance: unifi-network-application | |||||
| name: unifi-network-application | |||||
| namespace: unifi-server | |||||
| spec: | |||||
| progressDeadlineSeconds: 600 | |||||
| replicas: 1 | |||||
| revisionHistoryLimit: 10 | |||||
| selector: | |||||
| matchLabels: | |||||
| deployment: unifi-network-application | |||||
| strategy: | |||||
| rollingUpdate: | |||||
| maxSurge: 25% | |||||
| maxUnavailable: 25% | |||||
| type: RollingUpdate | |||||
| template: | |||||
| metadata: | |||||
| annotations: | |||||
| openshift.io/generated-by: OpenShiftNewApp | |||||
| creationTimestamp: null | |||||
| labels: | |||||
| deployment: unifi-network-application | |||||
| spec: | |||||
| containers: | |||||
| - env: | |||||
| - name: PUID | |||||
| value: "1000" | |||||
| - name: PGID | |||||
| value: "1000" | |||||
| - name: TZ | |||||
| value: "Etc/UTC" | |||||
| - name: MONGO_USER | |||||
| value: "unifi" | |||||
| - name: MONGO_PASS | |||||
| value: "Rey-1176" | |||||
| - name: MONGO_HOST | |||||
| value: "mongodb" | |||||
| - name: MONGO_PORT | |||||
| value: "27017" | |||||
| - name: MONGO_DBNAME | |||||
| value: "unifi" | |||||
| - name: MONGO_AUTHSOURCE | |||||
| value: "admin" | |||||
| - name: MONGO_SSL | |||||
| value: "True" | |||||
| image: linuxserver/unifi-network-application@sha256:f9ae7f54f094d047fea9d21533c56c204a0b22b4ebc7b5acdc9325c35c1b1f51 | |||||
| imagePullPolicy: IfNotPresent | |||||
| name: unifi-network-application | |||||
| ports: | |||||
| - containerPort: 8080 | |||||
| protocol: TCP | |||||
| - containerPort: 8443 | |||||
| protocol: TCP | |||||
| - containerPort: 8843 | |||||
| protocol: TCP | |||||
| - containerPort: 8880 | |||||
| protocol: TCP | |||||
| resources: {} | |||||
| terminationMessagePath: /dev/termination-log | |||||
| terminationMessagePolicy: File | |||||
| volumeMounts: | |||||
| - mountPath: /config | |||||
| name: unifi-network-application-volume-1 | |||||
| dnsPolicy: ClusterFirst | |||||
| restartPolicy: Always | |||||
| schedulerName: default-scheduler | |||||
| securityContext: {} | |||||
| terminationGracePeriodSeconds: 30 | |||||
| volumes: | |||||
| - name: unifi-network-application-volume-1 | |||||
| persistentVolumeClaim: | |||||
| claimName: unifi-network-application-volume-1 | |||||
| --- | |||||
| apiVersion: v1 | |||||
| kind: Service | |||||
| metadata: | |||||
| labels: | |||||
| app: unifi-network-application | |||||
| app.kubernetes.io/component: unifi-network-application | |||||
| app.kubernetes.io/instance: unifi-network-application | |||||
| name: unifi-network-application | |||||
| namespace: unifi-server | |||||
| spec: | |||||
| ports: | |||||
| - name: 8080-tcp | |||||
| port: 8080 | |||||
| protocol: TCP | |||||
| targetPort: 8080 | |||||
| - name: 8443-tcp | |||||
| port: 8443 | |||||
| protocol: TCP | |||||
| targetPort: 8443 | |||||
| - name: 8843-tcp | |||||
| port: 8843 | |||||
| protocol: TCP | |||||
| targetPort: 8843 | |||||
| - name: 8880-tcp | |||||
| port: 8880 | |||||
| protocol: TCP | |||||
| targetPort: 8880 | |||||
| selector: | |||||
| deployment: unifi-network-application | |||||
| sessionAffinity: None | |||||
| type: ClusterIP | |||||