π Deploy e Infraestrutura
Leve seus dashboards do desenvolvimento a producao com CI/CD, containers e cloud infrastructure.
π³ Docker e Containers
Empacote sua aplicacao com todas as dependencias
π O que e Docker
Docker permite empacotar sua aplicacao junto com todas as dependencias (Node.js, bibliotecas, configs) em uma imagem que roda de forma identica em qualquer ambiente - development, staging, production.
π» Dockerfile Otimizado para Dashboard
# Multi-stage build para imagem minima # Stage 1: Dependencies FROM node:20-alpine AS deps WORKDIR /app COPY package.json pnpm-lock.yaml ./ RUN corepack enable && pnpm install --frozen-lockfile # Stage 2: Builder FROM node:20-alpine AS builder WORKDIR /app COPY --from=deps /app/node_modules ./node_modules COPY . . # Build-time env vars ARG VITE_API_URL ARG VITE_ANALYTICS_ID ENV VITE_API_URL=$VITE_API_URL ENV VITE_ANALYTICS_ID=$VITE_ANALYTICS_ID RUN pnpm build # Stage 3: Runner (producao) FROM nginx:alpine AS runner # Configuracao nginx otimizada COPY nginx.conf /etc/nginx/nginx.conf COPY --from=builder /app/dist /usr/share/nginx/html # Security headers e compression RUN chmod -R 755 /usr/share/nginx/html # Health check HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ CMD wget --no-verbose --tries=1 --spider http://localhost/ || exit 1 EXPOSE 80 CMD ["nginx", "-g", "daemon off;"]
πΌ Docker Compose para Desenvolvimento
# docker-compose.yml
version: '3.8'
services:
dashboard:
build:
context: .
dockerfile: Dockerfile.dev
ports:
- "3000:3000"
volumes:
- .:/app
- /app/node_modules
environment:
- VITE_API_URL=http://api:4000
depends_on:
- api
- db
- redis
api:
build: ./api
ports:
- "4000:4000"
environment:
- DATABASE_URL=postgres://user:pass@db:5432/dashboard
- REDIS_URL=redis://redis:6379
depends_on:
- db
- redis
db:
image: postgres:16-alpine
environment:
POSTGRES_USER: user
POSTGRES_PASSWORD: pass
POSTGRES_DB: dashboard
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- "5432:5432"
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
- redis_data:/data
# Servico de metricas
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
volumes:
postgres_data:
redis_data:
π‘ Dicas de Otimizacao
- β’ Alpine images: Use versoes alpine (~50MB vs ~900MB)
- β’ Layer caching: Coloque COPY package.json antes de COPY . para cache de deps
- β’ Multi-stage: Imagem final nao inclui ferramentas de build
- β’ .dockerignore: Exclua node_modules, .git, dist do build context
π CI/CD Pipelines
Automatize build, test e deploy a cada commit
π Fluxo CI/CD
CI/CD (Continuous Integration/Continuous Deployment) automatiza todo o processo desde o commit ate producao, garantindo qualidade e velocidade de entrega.
π» GitHub Actions Pipeline
# .github/workflows/deploy.yml
name: Deploy Dashboard
on:
push:
branches: [main]
pull_request:
branches: [main]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
# Job 1: Lint e Testes
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: 20
cache: 'pnpm'
- run: pnpm install --frozen-lockfile
- run: pnpm lint
- run: pnpm type-check
- run: pnpm test:ci
# Job 2: Build e Push Docker
build:
needs: test
runs-on: ubuntu-latest
if: github.event_name == 'push'
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
- uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- uses: docker/metadata-action@v5
id: meta
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=sha
type=ref,event=branch
type=semver,pattern={{version}}
- uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
build-args: |
VITE_API_URL=${{ vars.API_URL }}
cache-from: type=gha
cache-to: type=gha,mode=max
# Job 3: Deploy
deploy:
needs: build
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
environment: production
steps:
- name: Deploy to Cloud Run
uses: google-github-actions/deploy-cloudrun@v2
with:
service: dashboard
region: us-central1
image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:sha-${{ github.sha }}
- name: Notify Slack
uses: slackapi/slack-github-action@v1
with:
payload: |
{"text": "Dashboard deployed! π"}
π§ Plataformas de CI/CD
| Plataforma | Preco | Integracao | Ideal Para |
|---|---|---|---|
| GitHub Actions | 2000 min/mes free | GitHub nativo | Projetos no GitHub |
| GitLab CI | 400 min/mes free | GitLab nativo | Self-hosted, Enterprise |
| CircleCI | 6000 min/mes free | GitHub/GitLab | Builds complexos |
| Vercel/Netlify | Free tier generoso | Git providers | Frontends estaticos |
βοΈ Cloud Providers
AWS, GCP, Azure - escolha a infraestrutura certa
π’ Comparativo dos Big 3
- β’ Maior market share (~32%)
- β’ Mais servicos (+200)
- β’ EC2, S3, Lambda, RDS
- β’ Complexo, muitas opcoes
- β’ Forte em enterprise
- β’ Integracao Microsoft
- β’ Active Directory nativo
- β’ App Service, Functions
- β’ Melhor DX
- β’ BigQuery top
- β’ Cloud Run simples
- β’ Precos competitivos
ποΈ Arquitetura de Referencia (AWS)
π» Infraestrutura como Codigo (Terraform)
# main.tf - AWS ECS Fargate
terraform {
required_providers {
aws = { source = "hashicorp/aws", version = "~> 5.0" }
}
}
# VPC
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "5.0.0"
name = "dashboard-vpc"
cidr = "10.0.0.0/16"
azs = ["us-east-1a", "us-east-1b"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24"]
public_subnets = ["10.0.101.0/24", "10.0.102.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
}
# ECS Cluster
resource "aws_ecs_cluster" "main" {
name = "dashboard-cluster"
setting {
name = "containerInsights"
value = "enabled"
}
}
# ECS Service
resource "aws_ecs_service" "dashboard" {
name = "dashboard"
cluster = aws_ecs_cluster.main.id
task_definition = aws_ecs_task_definition.dashboard.arn
desired_count = 2
launch_type = "FARGATE"
network_configuration {
subnets = module.vpc.private_subnets
security_groups = [aws_security_group.ecs.id]
}
load_balancer {
target_group_arn = aws_lb_target_group.dashboard.arn
container_name = "dashboard"
container_port = 80
}
}
# Task Definition
resource "aws_ecs_task_definition" "dashboard" {
family = "dashboard"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = 256
memory = 512
execution_role_arn = aws_iam_role.ecs_execution.arn
container_definitions = jsonencode([{
name = "dashboard"
image = "${aws_ecr_repository.dashboard.repository_url}:latest"
portMappings = [{ containerPort = 80 }]
logConfiguration = {
logDriver = "awslogs"
options = {
"awslogs-group" = "/ecs/dashboard"
"awslogs-region" = "us-east-1"
"awslogs-stream-prefix" = "ecs"
}
}
}])
}
π Opcoes Serverless/Managed
- β’ Vercel - Zero config, edge functions
- β’ Netlify - Similar, bom para JAMstack
- β’ Cloudflare Pages - Edge global, gratis generoso
- β’ Google Cloud Run - Container serverless
- β’ AWS App Runner - ECS simplificado
- β’ Railway/Render - PaaS moderno
βΈοΈ Kubernetes Essentials
Orquestracao de containers em escala
π Quando Usar Kubernetes
Kubernetes e poderoso mas adiciona complexidade. Use quando precisar de: multiplos servicos, auto-scaling avancado, deploys complexos (blue-green, canary), ou multi-cloud.
- β’ Muitos microservicos
- β’ Time de DevOps dedicado
- β’ Requisitos de escala complexos
- β’ Multi-cloud/hybrid
- β’ Equipe pequena
- β’ Aplicacao simples/monolito
- β’ Custo e prioridade
- β’ Serverless resolve
π Conceitos Fundamentais
π» Manifests Kubernetes
# deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: dashboard
labels:
app: dashboard
spec:
replicas: 3
selector:
matchLabels:
app: dashboard
template:
metadata:
labels:
app: dashboard
spec:
containers:
- name: dashboard
image: ghcr.io/empresa/dashboard:v1.2.0
ports:
- containerPort: 80
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "200m"
livenessProbe:
httpGet:
path: /health
port: 80
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 80
initialDelaySeconds: 5
periodSeconds: 5
env:
- name: API_URL
valueFrom:
configMapKeyRef:
name: dashboard-config
key: api_url
---
apiVersion: v1
kind: Service
metadata:
name: dashboard
spec:
selector:
app: dashboard
ports:
- port: 80
targetPort: 80
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dashboard
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
tls:
- hosts:
- dashboard.empresa.com
secretName: dashboard-tls
rules:
- host: dashboard.empresa.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: dashboard
port:
number: 80
π‘ Monitoring e Observabilidade
Metricas, logs e traces para entender sua aplicacao
π Os 3 Pilares da Observabilidade
Numeros agregados ao longo do tempo
- β’ CPU, memoria, latencia
- β’ Request rate, error rate
- β’ Business metrics
Eventos discretos com contexto
- β’ Erros e exceptions
- β’ Audit trail
- β’ Debug info
Jornada de uma request
- β’ Request flow
- β’ Latencia por servico
- β’ Dependencias
π οΈ Stack de Observabilidade
π¨ Alertas Essenciais
π» Instrumentacao com OpenTelemetry
// instrumentation.ts
import { NodeSDK } from '@opentelemetry/sdk-node';
import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node';
import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http';
import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-http';
import { PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics';
const sdk = new NodeSDK({
serviceName: 'dashboard-api',
traceExporter: new OTLPTraceExporter({
url: 'http://otel-collector:4318/v1/traces',
}),
metricReader: new PeriodicExportingMetricReader({
exporter: new OTLPMetricExporter({
url: 'http://otel-collector:4318/v1/metrics',
}),
exportIntervalMillis: 30000,
}),
instrumentations: [getNodeAutoInstrumentations()],
});
sdk.start();
// Custom metrics
import { metrics } from '@opentelemetry/api';
const meter = metrics.getMeter('dashboard');
const requestCounter = meter.createCounter('dashboard.requests', {
description: 'Total dashboard requests',
});
const latencyHistogram = meter.createHistogram('dashboard.latency', {
description: 'Request latency',
unit: 'ms',
});
// Uso
export function trackRequest(route: string, duration: number) {
requestCounter.add(1, { route });
latencyHistogram.record(duration, { route });
}
π Escalabilidade
Prepare sua aplicacao para crescer
βοΈ Vertical vs Horizontal Scaling
Aumentar recursos da maquina (CPU, RAM)
- β Simples, sem mudanca de codigo
- β Limite fisico, single point of failure
- β Bom para: bancos de dados
Adicionar mais instancias
- β Sem limite teorico, alta disponibilidade
- β Requer aplicacao stateless, load balancer
- β Bom para: APIs, frontends
π€ Auto Scaling
πΎ Estrategias de Cache
Assets estaticos (JS, CSS, imagens) cacheados globalmente. CloudFront, Cloudflare.
Dados frequentes, sessoes, resultados de queries. TTL de segundos a horas.
Queries repetidas cacheadas. Materialized views para agregacoes.
Service workers, HTTP cache headers. Cache-Control, ETag.
β Checklist de Producao
- β‘ HTTPS everywhere
- β‘ Security headers (CSP, HSTS)
- β‘ Secrets em vault/env vars
- β‘ WAF configurado
- β‘ Health checks configurados
- β‘ Graceful shutdown
- β‘ Retry com backoff
- β‘ Circuit breakers
- β‘ Logs estruturados
- β‘ Metricas de negocio
- β‘ Alertas configurados
- β‘ Dashboards de SRE
- β‘ Backups automaticos
- β‘ Multi-AZ/region
- β‘ RTO/RPO definidos
- β‘ Runbook de incidentes