🚀 Developer Cookbook - FASE 3: Cloud Computing

Recetas prácticas para construir, desplegar y operar infraestructura moderna


📚 Tabla de Contenidos

  1. Receta 3.3: AWS - Servicios Principales
  2. Receta 3.4: Serverless - AWS Lambda

Cloud Computing

Receta 3.3: AWS - Servicios Principales

Arquitectura típica en AWS:

┌─────────────────────────────────────────────────────────┐
│                    Route 53 (DNS)                        │
└────────────────────┬────────────────────────────────────┘

┌────────────────────▼────────────────────────────────────┐
│              CloudFront (CDN)                            │
└────────────────────┬────────────────────────────────────┘

┌────────────────────▼────────────────────────────────────┐
│         Application Load Balancer (ALB)                  │
└─────┬──────────────────────────────┬────────────────────┘
      │                              │
┌─────▼──────┐              ┌────────▼────────┐
│  ECS/EKS   │              │   ECS/EKS       │
│  (API)     │              │   (API)         │
│ AZ 1       │              │   AZ 2          │
└─────┬──────┘              └────────┬────────┘
      │                              │
      └──────────┬───────────────────┘

        ┌────────▼────────┐
        │   RDS (DB)      │
        │   Multi-AZ      │
        └────────┬────────┘

        ┌────────▼────────┐
        │  ElastiCache    │
        │   (Redis)       │
        └─────────────────┘

Terraform - Infraestructura en AWS:

# main.tf

# Provider configuration
terraform {
  required_version = ">= 1.0"

  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
  }

  # Remote state en S3
  backend "s3" {
    bucket         = "myapp-terraform-state"
    key            = "production/terraform.tfstate"
    region         = "us-east-1"
    encrypt        = true
    dynamodb_table = "terraform-lock"
  }
}

provider "aws" {
  region = var.aws_region

  default_tags {
    tags = {
      Environment = var.environment
      Project     = "myapp"
      ManagedBy   = "Terraform"
    }
  }
}

# Variables
variable "aws_region" {
  default = "us-east-1"
}

variable "environment" {
  default = "production"
}

# VPC
resource "aws_vpc" "main" {
  cidr_block           = "10.0.0.0/16"
  enable_dns_hostnames = true
  enable_dns_support   = true

  tags = {
    Name = "myapp-vpc"
  }
}

# Subnets (Multi-AZ)
resource "aws_subnet" "public" {
  count             = 2
  vpc_id            = aws_vpc.main.id
  cidr_block        = "10.0.${count.index}.0/24"
  availability_zone = data.aws_availability_zones.available.names[count.index]

  map_public_ip_on_launch = true

  tags = {
    Name = "myapp-public-${count.index + 1}"
  }
}

resource "aws_subnet" "private" {
  count             = 2
  vpc_id            = aws_vpc.main.id
  cidr_block        = "10.0.${count.index + 10}.0/24"
  availability_zone = data.aws_availability_zones.available.names[count.index]

  tags = {
    Name = "myapp-private-${count.index + 1}"
  }
}

# Internet Gateway
resource "aws_internet_gateway" "main" {
  vpc_id = aws_vpc.main.id

  tags = {
    Name = "myapp-igw"
  }
}

# NAT Gateway
resource "aws_eip" "nat" {
  count  = 2
  domain = "vpc"
}

resource "aws_nat_gateway" "main" {
  count         = 2
  allocation_id = aws_eip.nat[count.index].id
  subnet_id     = aws_subnet.public[count.index].id

  tags = {
    Name = "myapp-nat-${count.index + 1}"
  }
}

# Security Group - ALB
resource "aws_security_group" "alb" {
  name        = "myapp-alb-sg"
  description = "Security group for ALB"
  vpc_id      = aws_vpc.main.id

  ingress {
    from_port   = 80
    to_port     = 80
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  ingress {
    from_port   = 443
    to_port     = 443
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }
}

# Security Group - ECS
resource "aws_security_group" "ecs" {
  name        = "myapp-ecs-sg"
  description = "Security group for ECS tasks"
  vpc_id      = aws_vpc.main.id

  ingress {
    from_port       = 3000
    to_port         = 3000
    protocol        = "tcp"
    security_groups = [aws_security_group.alb.id]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }
}

# Application Load Balancer
resource "aws_lb" "main" {
  name               = "myapp-alb"
  internal           = false
  load_balancer_type = "application"
  security_groups    = [aws_security_group.alb.id]
  subnets            = aws_subnet.public[*].id

  enable_deletion_protection = true
  enable_http2              = true

  access_logs {
    bucket  = aws_s3_bucket.alb_logs.id
    enabled = true
  }
}

resource "aws_lb_target_group" "api" {
  name        = "myapp-api-tg"
  port        = 3000
  protocol    = "HTTP"
  vpc_id      = aws_vpc.main.id
  target_type = "ip"

  health_check {
    enabled             = true
    path                = "/health"
    healthy_threshold   = 2
    unhealthy_threshold = 3
    timeout             = 5
    interval            = 30
    matcher             = "200"
  }

  deregistration_delay = 30
}

resource "aws_lb_listener" "https" {
  load_balancer_arn = aws_lb.main.arn
  port              = "443"
  protocol          = "HTTPS"
  ssl_policy        = "ELBSecurityPolicy-TLS-1-2-2017-01"
  certificate_arn   = aws_acm_certificate.main.arn

  default_action {
    type             = "forward"
    target_group_arn = aws_lb_target_group.api.arn
  }
}

# ECS Cluster
resource "aws_ecs_cluster" "main" {
  name = "myapp-cluster"

  setting {
    name  = "containerInsights"
    value = "enabled"
  }
}

# ECS Task Definition
resource "aws_ecs_task_definition" "api" {
  family                   = "myapp-api"
  network_mode             = "awsvpc"
  requires_compatibilities = ["FARGATE"]
  cpu                      = "512"
  memory                   = "1024"
  execution_role_arn       = aws_iam_role.ecs_execution.arn
  task_role_arn            = aws_iam_role.ecs_task.arn

  container_definitions = jsonencode([
    {
      name  = "api"
      image = "${aws_ecr_repository.api.repository_url}:latest"

      portMappings = [
        {
          containerPort = 3000
          protocol      = "tcp"
        }
      ]

      environment = [
        {
          name  = "NODE_ENV"
          value = "production"
        },
        {
          name  = "DATABASE_URL"
          value = "postgresql://${aws_db_instance.main.endpoint}/myapp"
        }
      ]

      secrets = [
        {
          name      = "DB_PASSWORD"
          valueFrom = "${aws_secretsmanager_secret.db_password.arn}"
        }
      ]

      logConfiguration = {
        logDriver = "awslogs"
        options = {
          "awslogs-group"         = "/ecs/myapp-api"
          "awslogs-region"        = var.aws_region
          "awslogs-stream-prefix" = "ecs"
        }
      }

      healthCheck = {
        command     = ["CMD-SHELL", "curl -f http://localhost:3000/health || exit 1"]
        interval    = 30
        timeout     = 5
        retries     = 3
        startPeriod = 60
      }
    }
  ])
}

# ECS Service
resource "aws_ecs_service" "api" {
  name            = "myapp-api"
  cluster         = aws_ecs_cluster.main.id
  task_definition = aws_ecs_task_definition.api.arn
  desired_count   = 3
  launch_type     = "FARGATE"

  network_configuration {
    subnets          = aws_subnet.private[*].id
    security_groups  = [aws_security_group.ecs.id]
    assign_public_ip = false
  }

  load_balancer {
    target_group_arn = aws_lb_target_group.api.arn
    container_name   = "api"
    container_port   = 3000
  }

  deployment_configuration {
    maximum_percent         = 200
    minimum_healthy_percent = 100
  }

  depends_on = [aws_lb_listener.https]
}

# Auto Scaling
resource "aws_appautoscaling_target" "ecs" {
  max_capacity       = 10
  min_capacity       = 3
  resource_id        = "service/${aws_ecs_cluster.main.name}/${aws_ecs_service.api.name}"
  scalable_dimension = "ecs:service:DesiredCount"
  service_namespace  = "ecs"
}

resource "aws_appautoscaling_policy" "ecs_cpu" {
  name               = "myapp-api-cpu-scaling"
  policy_type        = "TargetTrackingScaling"
  resource_id        = aws_appautoscaling_target.ecs.resource_id
  scalable_dimension = aws_appautoscaling_target.ecs.scalable_dimension
  service_namespace  = aws_appautoscaling_target.ecs.service_namespace

  target_tracking_scaling_policy_configuration {
    target_value       = 70.0
    scale_in_cooldown  = 300
    scale_out_cooldown = 60

    predefined_metric_specification {
      predefined_metric_type = "ECSServiceAverageCPUUtilization"
    }
  }
}

# RDS PostgreSQL
resource "aws_db_instance" "main" {
  identifier             = "myapp-db"
  engine                 = "postgres"
  engine_version         = "15.3"
  instance_class         = "db.t3.medium"
  allocated_storage      = 100
  storage_type           = "gp3"
  storage_encrypted      = true

  db_name  = "myapp"
  username = "postgres"
  password = random_password.db_password.result

  multi_az               = true
  db_subnet_group_name   = aws_db_subnet_group.main.name
  vpc_security_group_ids = [aws_security_group.rds.id]

  backup_retention_period = 7
  backup_window          = "03:00-04:00"
  maintenance_window     = "sun:04:00-sun:05:00"

  enabled_cloudwatch_logs_exports = ["postgresql", "upgrade"]

  deletion_protection = true
  skip_final_snapshot = false
  final_snapshot_identifier = "myapp-db-final-snapshot"
}

# ElastiCache Redis
resource "aws_elasticache_cluster" "redis" {
  cluster_id           = "myapp-redis"
  engine               = "redis"
  engine_version       = "7.0"
  node_type            = "cache.t3.medium"
  num_cache_nodes      = 1
  parameter_group_name = "default.redis7"
  subnet_group_name    = aws_elasticache_subnet_group.main.name
  security_group_ids   = [aws_security_group.redis.id]

  snapshot_retention_limit = 5
  snapshot_window         = "03:00-05:00"
}

# S3 Bucket
resource "aws_s3_bucket" "assets" {
  bucket = "myapp-assets-${var.environment}"
}

resource "aws_s3_bucket_versioning" "assets" {
  bucket = aws_s3_bucket.assets.id

  versioning_configuration {
    status = "Enabled"
  }
}

resource "aws_s3_bucket_server_side_encryption_configuration" "assets" {
  bucket = aws_s3_bucket.assets.id

  rule {
    apply_server_side_encryption_by_default {
      sse_algorithm = "AES256"
    }
  }
}

# CloudFront Distribution
resource "aws_cloudfront_distribution" "main" {
  enabled             = true
  is_ipv6_enabled     = true
  price_class         = "PriceClass_100"

  origin {
    domain_name = aws_s3_bucket.assets.bucket_regional_domain_name
    origin_id   = "S3-${aws_s3_bucket.assets.id}"

    s3_origin_config {
      origin_access_identity = aws_cloudfront_origin_access_identity.main.cloudfront_access_identity_path
    }
  }

  default_cache_behavior {
    allowed_methods        = ["GET", "HEAD", "OPTIONS"]
    cached_methods         = ["GET", "HEAD"]
    target_origin_id       = "S3-${aws_s3_bucket.assets.id}"
    viewer_protocol_policy = "redirect-to-https"
    compress               = true

    forwarded_values {
      query_string = false
      cookies {
        forward = "none"
      }
    }

    min_ttl     = 0
    default_ttl = 3600
    max_ttl     = 86400
  }

  restrictions {
    geo_restriction {
      restriction_type = "none"
    }
  }

  viewer_certificate {
    cloudfront_default_certificate = true
  }
}

# Outputs
output "alb_dns_name" {
  value = aws_lb.main.dns_name
}

output "rds_endpoint" {
  value     = aws_db_instance.main.endpoint
  sensitive = true
}

Receta 3.4: Serverless - AWS Lambda

¿Qué es Serverless? Ejecutar código sin gestionar servidores. Pagas solo por tiempo de ejecución.

Lambda Function (Python):

# lambda_function.py
import json
import boto3
from datetime import datetime

# Clientes AWS
s3 = boto3.client('s3')
dynamodb = boto3.resource('dynamodb')
sns = boto3.client('sns')

def lambda_handler(event, context):
    """
    Handler principal de Lambda

    Args:
        event: Evento que triggerea la función
        context: Contexto de ejecución (request_id, etc.)

    Returns:
        Response con statusCode y body
    """

    print(f"Received event: {json.dumps(event)}")

    try:
        # Parsear body si es API Gateway
        if 'body' in event:
            body = json.loads(event['body'])
        else:
            body = event

        # Procesar request
        result = process_request(body)

        # Response exitoso
        return {
            'statusCode': 200,
            'headers': {
                'Content-Type': 'application/json',
                'Access-Control-Allow-Origin': '*'
            },
            'body': json.dumps({
                'message': 'Success',
                'data': result,
                'timestamp': datetime.utcnow().isoformat()
            })
        }

    except ValueError as e:
        return error_response(400, str(e))
    except Exception as e:
        print(f"Error: {str(e)}")
        return error_response(500, "Internal server error")

def process_request(data):
    """Lógica de negocio"""
    # Guardar en DynamoDB
    table = dynamodb.Table('myapp-data')
    table.put_item(Item={
        'id': data['id'],
        'timestamp': datetime.utcnow().isoformat(),
        'data': data
    })

    # Subir a S3
    s3.put_object(
        Bucket='myapp-bucket',
        Key=f"data/{data['id']}.json",
        Body=json.dumps(data),
        ContentType='application/json'
    )

    # Notificar via SNS
    sns.publish(
        TopicArn='arn:aws:sns:us-east-1:123456789:myapp-notifications',
        Subject='New data processed',
        Message=json.dumps(data)
    )

    return {'processed': True, 'id': data['id']}

def error_response(status_code, message):
    """Helper para responses de error"""
    return {
        'statusCode': status_code,
        'headers': {
            'Content-Type': 'application/json',
            'Access-Control-Allow-Origin': '*'
        },
        'body': json.dumps({
            'error': message,
            'timestamp': datetime.utcnow().isoformat()
        })
    }

Serverless Framework:

# serverless.yml
service: myapp-api

provider:
  name: aws
  runtime: python3.11
  stage: ${opt:stage, 'dev'}
  region: us-east-1
  memorySize: 512
  timeout: 30

  # Variables de entorno
  environment:
    STAGE: ${self:provider.stage}
    TABLE_NAME: ${self:custom.tableName}

  # IAM permissions
  iam:
    role:
      statements:
        - Effect: Allow
          Action:
            - dynamodb:PutItem
            - dynamodb:GetItem
            - dynamodb:Query
          Resource: "arn:aws:dynamodb:${self:provider.region}:*:table/${self:custom.tableName}"
        - Effect: Allow
          Action:
            - s3:PutObject
            - s3:GetObject
          Resource: "arn:aws:s3:::${self:custom.bucketName}/*"
        - Effect: Allow
          Action:
            - sns:Publish
          Resource: "*"

custom:
  tableName: myapp-data-${self:provider.stage}
  bucketName: myapp-bucket-${self:provider.stage}

functions:
  # HTTP API
  api:
    handler: lambda_function.lambda_handler
    events:
      - http:
          path: /process
          method: POST
          cors: true

  # S3 Trigger
  processImage:
    handler: image_processor.handler
    events:
      - s3:
          bucket: ${self:custom.bucketName}
          event: s3:ObjectCreated:*
          rules:
            - prefix: uploads/
            - suffix: .jpg

  # Scheduled (cron)
  dailyReport:
    handler: reports.daily_handler
    events:
      - schedule:
          rate: cron(0 8 * * ? *)  # Todos los días a las 8am UTC
          enabled: true

  # SQS Queue
  processQueue:
    handler: queue_processor.handler
    events:
      - sqs:
          arn:
            Fn::GetAtt:
              - ProcessQueue
              - Arn
          batchSize: 10

  # DynamoDB Stream
  streamProcessor:
    handler: stream_processor.handler
    events:
      - stream:
          type: dynamodb
          arn:
            Fn::GetAtt:
              - DataTable
              - StreamArn

resources:
  Resources:
    # DynamoDB Table
    DataTable:
      Type: AWS::DynamoDB::Table
      Properties:
        TableName: ${self:custom.tableName}
        BillingMode: PAY_PER_REQUEST
        AttributeDefinitions:
          - AttributeName: id
            AttributeType: S
        KeySchema:
          - AttributeName: id
            KeyType: HASH
        StreamSpecification:
          StreamViewType: NEW_AND_OLD_IMAGES

    # SQS Queue
    ProcessQueue:
      Type: AWS::SQS::Queue
      Properties:
        QueueName: myapp-process-queue
        VisibilityTimeout: 300
        MessageRetentionPeriod: 1209600  # 14 days

    # S3 Bucket
    AssetsBucket:
      Type: AWS::S3::Bucket
      Properties:
        BucketName: ${self:custom.bucketName}
        CorsConfiguration:
          CorsRules:
            - AllowedOrigins:
                - '*'
              AllowedMethods:
                - GET
                - PUT
                - POST
              AllowedHeaders:
                - '*'

plugins:
  - serverless-python-requirements
  - serverless-offline

package:
  exclude:
    - node_modules/**
    - venv/**
    - .git/**

Comandos Serverless:

# Deploy
serverless deploy --stage production

# Deploy solo función
serverless deploy function -f api

# Invoke local
serverless invoke local -f api -d '{"body": "{\"id\": \"123\"}"}'

# Invoke remoto
serverless invoke -f api -d '{"body": "{\"id\": \"123\"}"}'

# Logs
serverless logs -f api -t

# Remove (eliminar todo)
serverless remove

AWS SAM (Serverless Application Model):

# template.yaml
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Description: My Serverless App

Globals:
  Function:
    Timeout: 30
    Runtime: python3.11
    MemorySize: 512
    Environment:
      Variables:
        TABLE_NAME: !Ref DataTable

Resources:
  # API Gateway
  MyApi:
    Type: AWS::Serverless::Api
    Properties:
      StageName: Prod
      Cors:
        AllowOrigin: "'*'"
        AllowHeaders: "'*'"
      Auth:
        ApiKeyRequired: true

  # Lambda Function
  ProcessFunction:
    Type: AWS::Serverless::Function
    Properties:
      CodeUri: src/
      Handler: lambda_function.lambda_handler
      Policies:
        - DynamoDBCrudPolicy:
            TableName: !Ref DataTable
      Events:
        ApiEvent:
          Type: Api
          Properties:
            RestApiId: !Ref MyApi
            Path: /process
            Method: POST

  # DynamoDB
  DataTable:
    Type: AWS::DynamoDB::Table
    Properties:
      TableName: myapp-data
      BillingMode: PAY_PER_REQUEST
      AttributeDefinitions:
        - AttributeName: id
          AttributeType: S
      KeySchema:
        - AttributeName: id
          KeyType: HASH

Outputs:
  ApiUrl:
    Description: API Gateway endpoint URL
    Value: !Sub 'https://${MyApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/'

Best Practices Serverless:

# 1. MINIMIZAR COLD STARTS
import json

# ✅ Inicializar fuera del handler (reusado entre invocaciones)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['TABLE_NAME'])

def lambda_handler(event, context):
    # Handler rápido
    return table.get_item(Key={'id': event['id']})

# 2. USAR LAYERS para dependencias compartidas
# Reduce tamaño del deployment package

# 3. CONFIGURAR RESERVED CONCURRENCY
# Evitar throttling y controlar costos

# 4. IMPLEMENTAR IDEMPOTENCIA
def lambda_handler(event, context):
    request_id = event['requestId']

    # Check si ya procesamos este request
    existing = table.get_item(Key={'id': request_id})
    if existing.get('Item'):
        return existing['Item']['result']

    # Procesar y guardar
    result = process()
    table.put_item(Item={'id': request_id, 'result': result})
    return result

# 5. MANEJAR TIMEOUTS
import signal

class TimeoutError(Exception):
    pass

def timeout_handler(signum, frame):
    raise TimeoutError("Function timeout")

def lambda_handler(event, context):
    # Set timeout 5 segundos antes del límite de Lambda
    signal.signal(signal.SIGALRM, timeout_handler)
    signal.alarm(context.get_remaining_time_in_millis() // 1000 - 5)

    try:
        return process()
    except TimeoutError:
        # Graceful degradation
        return partial_result()

# 6. ESTRUCTURAR LOGS para CloudWatch Insights
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)

def lambda_handler(event, context):
    logger.info('Event received', extra={
        'event_type': event.get('type'),
        'user_id': event.get('userId'),
        'request_id': context.request_id
    })

Versión: 1.0 Fecha: 2024 Autor: Roadmap del Desarrollador del Futuro Licencia: Uso educativo