🚀 Developer Cookbook - FASE 3: Contenedores y Orquestación
Recetas prácticas para construir, desplegar y operar infraestructura moderna
📚 Tabla de Contenidos
Contenedores y Orquestación
Receta 3.1: Docker - Containerización Básica
¿Qué es Docker? Plataforma para empaquetar aplicaciones con sus dependencias en contenedores aislados y portables.
Conceptos clave:
- Imagen: Template inmutable (blueprint)
- Contenedor: Instancia ejecutable de una imagen
- Dockerfile: Receta para construir imágenes
- Registry: Repositorio de imágenes (Docker Hub, ECR, GCR)
Dockerfile - Python API:
# ===== MULTI-STAGE BUILD =====
# Stage 1: Builder (dependencias pesadas)
FROM python:3.11-slim as builder
WORKDIR /app
# Instalar dependencias de build
RUN apt-get update && apt-get install -y \
gcc \
&& rm -rf /var/lib/apt/lists/*
# Copiar requirements primero (layer caching)
COPY requirements.txt .
# Instalar dependencias en virtualenv
RUN python -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install --no-cache-dir -r requirements.txt
# Stage 2: Runtime (imagen final mínima)
FROM python:3.11-slim
# Crear usuario no-root (security best practice)
RUN useradd -m -u 1000 appuser
WORKDIR /app
# Copiar virtualenv del builder
COPY --from=builder /opt/venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
# Copiar código de la aplicación
COPY --chown=appuser:appuser . .
# Cambiar a usuario no-root
USER appuser
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD python -c "import requests; requests.get('http://localhost:8000/health')"
# Exponer puerto
EXPOSE 8000
# Comando de inicio
CMD ["gunicorn", "--bind", "0.0.0.0:8000", "--workers", "4", "app:app"]
Dockerfile - Node.js:
# ===== MULTI-STAGE BUILD =====
FROM node:18-alpine AS builder
WORKDIR /app
# Copiar package files
COPY package*.json ./
# Instalar dependencias
RUN npm ci --only=production
# Copiar código fuente
COPY . .
# Build (si usas TypeScript, webpack, etc.)
RUN npm run build
# ===== RUNTIME =====
FROM node:18-alpine
# Instalar dumb-init (mejor manejo de señales)
RUN apk add --no-cache dumb-init
# Usuario no-root
RUN addgroup -g 1000 nodeuser && \
adduser -D -u 1000 -G nodeuser nodeuser
WORKDIR /app
# Copiar node_modules y build
COPY --from=builder --chown=nodeuser:nodeuser /app/node_modules ./node_modules
COPY --from=builder --chown=nodeuser:nodeuser /app/dist ./dist
COPY --chown=nodeuser:nodeuser package*.json ./
USER nodeuser
EXPOSE 3000
# Usar dumb-init como PID 1
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["node", "dist/index.js"]
Comandos Docker esenciales:
# ===== BUILD =====
# Build básico
docker build -t myapp:1.0 .
# Build con argumentos
docker build --build-arg NODE_ENV=production -t myapp:prod .
# Build sin cache
docker build --no-cache -t myapp:latest .
# Multi-platform build
docker buildx build --platform linux/amd64,linux/arm64 -t myapp:multiarch .
# ===== RUN =====
# Run básico
docker run -d -p 8000:8000 --name myapp myapp:1.0
# Run con variables de entorno
docker run -d \
-e DATABASE_URL=postgresql://localhost/mydb \
-e API_KEY=secret123 \
--name myapp \
myapp:1.0
# Run con volumes (persistencia)
docker run -d \
-v $(pwd)/data:/app/data \
-v myapp-logs:/var/log \
--name myapp \
myapp:1.0
# Run con limits (recursos)
docker run -d \
--memory="512m" \
--cpus="1.5" \
--name myapp \
myapp:1.0
# Run interactivo (debugging)
docker run -it --rm myapp:1.0 /bin/bash
# ===== INSPECT =====
# Ver logs
docker logs -f myapp
# Ver procesos
docker top myapp
# Ver stats en tiempo real
docker stats myapp
# Inspeccionar contenedor
docker inspect myapp
# Ejecutar comando en contenedor corriendo
docker exec -it myapp /bin/bash
docker exec myapp ls -la /app
# ===== CLEANUP =====
# Parar contenedor
docker stop myapp
# Remover contenedor
docker rm myapp
# Remover imagen
docker rmi myapp:1.0
# Limpiar todo (contenedores stopped, imágenes sin usar)
docker system prune -a
# Ver uso de disco
docker system df
Docker Compose - Multi-container App:
# docker-compose.yml
version: '3.8'
services:
# API Backend
api:
build:
context: ./api
dockerfile: Dockerfile
args:
- NODE_ENV=production
container_name: myapp-api
ports:
- "3000:3000"
environment:
- DATABASE_URL=postgresql://postgres:password@db:5432/myapp
- REDIS_URL=redis://redis:6379
- NODE_ENV=production
depends_on:
db:
condition: service_healthy
redis:
condition: service_started
volumes:
- ./api/logs:/app/logs
networks:
- app-network
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# PostgreSQL Database
db:
image: postgres:15-alpine
container_name: myapp-db
environment:
- POSTGRES_DB=myapp
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=password
volumes:
- postgres-data:/var/lib/postgresql/data
- ./init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- app-network
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
# Redis Cache
redis:
image: redis:7-alpine
container_name: myapp-redis
command: redis-server --appendonly yes
volumes:
- redis-data:/data
networks:
- app-network
restart: unless-stopped
# Nginx Reverse Proxy
nginx:
image: nginx:alpine
container_name: myapp-nginx
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/nginx/ssl:ro
depends_on:
- api
networks:
- app-network
restart: unless-stopped
# Worker (background jobs)
worker:
build:
context: ./api
dockerfile: Dockerfile
container_name: myapp-worker
command: ["npm", "run", "worker"]
environment:
- REDIS_URL=redis://redis:6379
- DATABASE_URL=postgresql://postgres:password@db:5432/myapp
depends_on:
- redis
- db
networks:
- app-network
restart: unless-stopped
volumes:
postgres-data:
driver: local
redis-data:
driver: local
networks:
app-network:
driver: bridge
Comandos Docker Compose:
# Iniciar todos los servicios
docker-compose up -d
# Iniciar servicios específicos
docker-compose up -d api db
# Ver logs
docker-compose logs -f api
# Escalar servicio
docker-compose up -d --scale worker=3
# Rebuild y reiniciar
docker-compose up -d --build
# Parar servicios
docker-compose stop
# Parar y remover contenedores
docker-compose down
# Parar y remover contenedores + volumes
docker-compose down -v
# Ver servicios corriendo
docker-compose ps
# Ejecutar comando en servicio
docker-compose exec api npm test
# Ver variables de entorno
docker-compose config
Optimización de Imágenes:
# ❌ MAL: Imagen pesada (1.2 GB)
FROM ubuntu:latest
RUN apt-get update && apt-get install -y python3 python3-pip
COPY . /app
WORKDIR /app
RUN pip3 install -r requirements.txt
CMD ["python3", "app.py"]
# ✅ BIEN: Imagen optimizada (150 MB)
FROM python:3.11-slim
WORKDIR /app
# Instalar dependencias primero (caching)
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copiar código después
COPY . .
# Usuario no-root
RUN useradd -m appuser
USER appuser
CMD ["python", "app.py"]
# ✅ MEJOR: Multi-stage build (80 MB)
FROM python:3.11-slim as builder
WORKDIR /app
COPY requirements.txt .
RUN pip install --user --no-cache-dir -r requirements.txt
FROM python:3.11-alpine
WORKDIR /app
COPY --from=builder /root/.local /root/.local
COPY . .
ENV PATH=/root/.local/bin:$PATH
RUN adduser -D appuser
USER appuser
CMD ["python", "app.py"]
Best Practices:
# 1. USAR .dockerignore
# .dockerignore
node_modules
npm-debug.log
.git
.env
*.md
.pytest_cache
__pycache__
# 2. LAYER CACHING - Copiar dependencias primero
COPY package*.json ./
RUN npm ci
COPY . . # Código cambia frecuentemente, va al final
# 3. MINIMIZAR LAYERS - Combinar comandos
RUN apt-get update && \
apt-get install -y curl wget && \
rm -rf /var/lib/apt/lists/*
# 4. USAR IMÁGENES ALPINE cuando sea posible
FROM node:18-alpine # 50 MB vs 300 MB
# 5. NO CORRER COMO ROOT
RUN adduser -D -u 1000 appuser
USER appuser
# 6. ESPECIFICAR VERSIONES
FROM node:18.16.0-alpine # ✅
FROM node:latest # ❌
# 7. HEALTH CHECKS
HEALTHCHECK --interval=30s CMD curl -f http://localhost/ || exit 1
# 8. METADATA
LABEL maintainer="dev@example.com"
LABEL version="1.0"
LABEL description="My application"
Receta 3.2: Kubernetes - Orquestación de Contenedores
¿Qué es Kubernetes? Sistema de orquestación para automatizar deployment, scaling y management de aplicaciones containerizadas.
Arquitectura básica:
- Cluster: Conjunto de nodos (servidores)
- Node: Máquina física/virtual que ejecuta contenedores
- Pod: Unidad más pequeña, contiene 1+ contenedores
- Service: Abstracción para exponer Pods
- Deployment: Define estado deseado de los Pods
Deployment YAML:
# deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-api
namespace: production
labels:
app: myapp
tier: backend
spec:
# Número de replicas
replicas: 3
# Selector para identificar Pods
selector:
matchLabels:
app: myapp
tier: backend
# Estrategia de deployment
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1 # Pods extra durante update
maxUnavailable: 0 # Pods mínimos disponibles
# Template del Pod
template:
metadata:
labels:
app: myapp
tier: backend
version: v1.2.0
spec:
# Affinity: preferir nodos diferentes (HA)
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- myapp
topologyKey: kubernetes.io/hostname
# Init containers (ejecutan antes del main)
initContainers:
- name: wait-for-db
image: busybox:1.28
command: ['sh', '-c', 'until nc -z postgres 5432; do echo waiting for db; sleep 2; done']
# Contenedores principales
containers:
- name: api
image: myregistry.io/myapp:1.2.0
imagePullPolicy: IfNotPresent
# Puertos
ports:
- containerPort: 3000
name: http
protocol: TCP
# Variables de entorno
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: myapp-secrets
key: database-url
- name: REDIS_HOST
valueFrom:
configMapKeyRef:
name: myapp-config
key: redis-host
# Resource limits (CRITICAL)
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
# Probes (health checks)
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
# Volumes
volumeMounts:
- name: config
mountPath: /app/config
readOnly: true
- name: logs
mountPath: /app/logs
# Volumes
volumes:
- name: config
configMap:
name: myapp-config
- name: logs
emptyDir: {}
# Image pull secrets (registries privados)
imagePullSecrets:
- name: myregistry-secret
Service (LoadBalancer):
# service.yaml
apiVersion: v1
kind: Service
metadata:
name: myapp-api-service
namespace: production
labels:
app: myapp
spec:
type: LoadBalancer # ClusterIP, NodePort, LoadBalancer
selector:
app: myapp
tier: backend
ports:
- port: 80 # Puerto del service
targetPort: 3000 # Puerto del container
protocol: TCP
name: http
sessionAffinity: ClientIP # Sticky sessions
ConfigMap (configuración):
# configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: myapp-config
namespace: production
data:
# Valores simples
redis-host: "redis-service.production.svc.cluster.local"
log-level: "info"
# Archivos completos
app-config.json: |
{
"api": {
"rateLimit": 100,
"timeout": 30
},
"features": {
"newUI": true
}
}
Secret (credenciales):
# secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: myapp-secrets
namespace: production
type: Opaque
data:
# Base64 encoded
database-url: cG9zdGdyZXNxbDovL3VzZXI6cGFzc0BkYi9teWRi
api-key: c2VjcmV0MTIz
stringData:
# Plain text (auto-encoded)
smtp-password: "mypassword123"
Horizontal Pod Autoscaler (HPA):
# hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: myapp-api-hpa
namespace: production
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: myapp-api
minReplicas: 3
maxReplicas: 10
metrics:
# Escalar basado en CPU
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
# Escalar basado en memoria
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
# Escalar basado en métrica custom
- type: Pods
pods:
metric:
name: http_requests_per_second
target:
type: AverageValue
averageValue: "1000"
behavior:
scaleDown:
stabilizationWindowSeconds: 300 # Esperar 5min antes de scale down
policies:
- type: Percent
value: 50
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 30
Ingress (routing HTTP/HTTPS):
# ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp-ingress
namespace: production
annotations:
# Nginx Ingress Controller
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rate-limit: "100"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
tls:
- hosts:
- api.myapp.com
secretName: myapp-tls
rules:
- host: api.myapp.com
http:
paths:
- path: /api
pathType: Prefix
backend:
service:
name: myapp-api-service
port:
number: 80
- path: /
pathType: Prefix
backend:
service:
name: myapp-frontend-service
port:
number: 80
PersistentVolumeClaim (almacenamiento):
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
namespace: production
spec:
accessModes:
- ReadWriteOnce
storageClassName: fast-ssd
resources:
requests:
storage: 100Gi
StatefulSet (bases de datos):
# statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
namespace: production
spec:
serviceName: postgres-headless
replicas: 3
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
containers:
- name: postgres
image: postgres:15
ports:
- containerPort: 5432
name: postgres
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres-secret
key: password
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: postgres-storage
mountPath: /var/lib/postgresql/data
volumeClaimTemplates:
- metadata:
name: postgres-storage
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: fast-ssd
resources:
requests:
storage: 50Gi
Comandos kubectl esenciales:
# ===== APPLY / CREATE =====
# Aplicar configuración
kubectl apply -f deployment.yaml
# Aplicar directorio completo
kubectl apply -f ./k8s/
# Crear namespace
kubectl create namespace production
# ===== GET / DESCRIBE =====
# Listar recursos
kubectl get pods
kubectl get deployments
kubectl get services
kubectl get all
# Con namespace específico
kubectl get pods -n production
# Con más detalles
kubectl get pods -o wide
# En todos los namespaces
kubectl get pods --all-namespaces
# Describir recurso
kubectl describe pod myapp-api-abc123
kubectl describe deployment myapp-api
# ===== LOGS =====
# Ver logs
kubectl logs myapp-api-abc123
# Follow logs
kubectl logs -f myapp-api-abc123
# Logs de contenedor específico
kubectl logs myapp-api-abc123 -c sidecar
# Logs previos (crashed container)
kubectl logs myapp-api-abc123 --previous
# ===== EXEC =====
# Ejecutar comando
kubectl exec myapp-api-abc123 -- ls -la
# Shell interactivo
kubectl exec -it myapp-api-abc123 -- /bin/bash
# ===== SCALE =====
# Escalar deployment
kubectl scale deployment myapp-api --replicas=5
# Autoscale
kubectl autoscale deployment myapp-api --min=3 --max=10 --cpu-percent=80
# ===== UPDATE =====
# Actualizar imagen
kubectl set image deployment/myapp-api api=myapp:1.3.0
# Rollout status
kubectl rollout status deployment/myapp-api
# Rollout history
kubectl rollout history deployment/myapp-api
# Rollback
kubectl rollout undo deployment/myapp-api
kubectl rollout undo deployment/myapp-api --to-revision=2
# ===== DELETE =====
# Eliminar pod
kubectl delete pod myapp-api-abc123
# Eliminar deployment
kubectl delete deployment myapp-api
# Eliminar por archivo
kubectl delete -f deployment.yaml
# Force delete (stuck resources)
kubectl delete pod myapp-api-abc123 --force --grace-period=0
# ===== DEBUG =====
# Port forward (debug local)
kubectl port-forward pod/myapp-api-abc123 8080:3000
# Ver eventos
kubectl get events --sort-by=.metadata.creationTimestamp
# Top pods (resource usage)
kubectl top pods
kubectl top nodes
# ===== CONTEXTOS =====
# Ver contextos (clusters)
kubectl config get-contexts
# Cambiar contexto
kubectl config use-context production-cluster
# Set namespace default
kubectl config set-context --current --namespace=production
Helm - Package Manager para K8s:
# Chart.yaml
apiVersion: v2
name: myapp
description: My application Helm chart
version: 1.0.0
appVersion: 1.2.0
# values.yaml
replicaCount: 3
image:
repository: myregistry.io/myapp
tag: "1.2.0"
pullPolicy: IfNotPresent
service:
type: LoadBalancer
port: 80
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 70
ingress:
enabled: true
host: api.myapp.com
tls:
enabled: true
# templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "myapp.fullname" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "myapp.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "myapp.selectorLabels" . | nindent 8 }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 3000
resources:
{{- toYaml .Values.resources | nindent 10 }}
Comandos Helm:
# Instalar chart
helm install myapp ./myapp-chart
# Install con custom values
helm install myapp ./myapp-chart -f values-prod.yaml
# Upgrade
helm upgrade myapp ./myapp-chart
# Rollback
helm rollback myapp 1
# Ver releases
helm list
# Uninstall
helm uninstall myapp
# Ver chart renderizado
helm template myapp ./myapp-chart
# Package chart
helm package myapp-chart
Versión: 1.0 Fecha: 2024 Autor: Roadmap del Desarrollador del Futuro Licencia: Uso educativo