TechLead
Lesson 3 of 22
5 min read
Performance Engineering

Lighthouse & PageSpeed Insights

Use Lighthouse programmatically, interpret scores, and automate performance audits in CI/CD pipelines

What Is Lighthouse?

Lighthouse is an open-source, automated tool from Google for auditing web page quality. It runs a series of tests against a page and generates a report with scores for performance, accessibility, best practices, SEO, and progressive web app readiness. Lighthouse can be run in Chrome DevTools, from the command line, or as a Node module.

Lighthouse Performance Score Weights

  • Total Blocking Time (TBT): 30% — Sum of blocking time for long tasks (proxy for INP)
  • Largest Contentful Paint (LCP): 25% — When the main content loads
  • Cumulative Layout Shift (CLS): 25% — Visual stability score
  • First Contentful Paint (FCP): 10% — When the first content appears
  • Speed Index: 10% — How quickly content is visually populated

Running Lighthouse Programmatically

// lighthouse-audit.ts — Run Lighthouse from Node.js
import lighthouse from 'lighthouse';
import * as chromeLauncher from 'chrome-launcher';

interface AuditResult {
  url: string;
  scores: Record<string, number>;
  metrics: Record<string, number>;
  opportunities: Array<{ title: string; savings: string }>;
}

async function runLighthouseAudit(url: string): Promise<AuditResult> {
  // Launch Chrome in headless mode
  const chrome = await chromeLauncher.launch({
    chromeFlags: ['--headless', '--no-sandbox', '--disable-gpu'],
  });

  const options = {
    logLevel: 'info' as const,
    output: 'json' as const,
    port: chrome.port,
    onlyCategories: ['performance'],
    formFactor: 'mobile' as const,
    throttling: {
      rttMs: 150,          // Round-trip time
      throughputKbps: 1638, // Download speed (Slow 4G)
      cpuSlowdownMultiplier: 4,
      requestLatencyMs: 0,
      downloadThroughputKbps: 0,
      uploadThroughputKbps: 0,
    },
    screenEmulation: {
      mobile: true,
      width: 412,
      height: 823,
      deviceScaleFactor: 1.75,
      disabled: false,
    },
  };

  const result = await lighthouse(url, options);
  await chrome.kill();

  if (!result || !result.lhr) {
    throw new Error('Lighthouse audit failed');
  }

  const { lhr } = result;

  return {
    url,
    scores: {
      performance: (lhr.categories.performance?.score ?? 0) * 100,
    },
    metrics: {
      FCP: lhr.audits['first-contentful-paint']?.numericValue ?? 0,
      LCP: lhr.audits['largest-contentful-paint']?.numericValue ?? 0,
      TBT: lhr.audits['total-blocking-time']?.numericValue ?? 0,
      CLS: lhr.audits['cumulative-layout-shift']?.numericValue ?? 0,
      SI: lhr.audits['speed-index']?.numericValue ?? 0,
      TTI: lhr.audits['interactive']?.numericValue ?? 0,
    },
    opportunities: Object.values(lhr.audits)
      .filter((audit: any) => audit.details?.type === 'opportunity' && audit.details?.overallSavingsMs > 0)
      .map((audit: any) => ({
        title: audit.title,
        savings: `${audit.details.overallSavingsMs}ms`,
      })),
  };
}

// Run audit
const result = await runLighthouseAudit('https://example.com');
console.log('Performance Score:', result.scores.performance);
console.log('LCP:', result.metrics.LCP, 'ms');
console.log('Opportunities:', result.opportunities);

Automating Lighthouse in CI/CD

Running Lighthouse in your CI/CD pipeline ensures performance regressions are caught before deployment. You can use Lighthouse CI (LHCI) to automate audits, compare against budgets, and store historical results.

// lighthouserc.js — Lighthouse CI configuration
module.exports = {
  ci: {
    collect: {
      url: [
        'http://localhost:3000/',
        'http://localhost:3000/products',
        'http://localhost:3000/checkout',
      ],
      startServerCommand: 'npm run start',
      startServerReadyPattern: 'ready on',
      numberOfRuns: 3, // Run 3 times for median
      settings: {
        formFactor: 'mobile',
        throttling: {
          rttMs: 150,
          throughputKbps: 1638,
          cpuSlowdownMultiplier: 4,
        },
      },
    },
    assert: {
      assertions: {
        'categories:performance': ['error', { minScore: 0.9 }],
        'first-contentful-paint': ['warn', { maxNumericValue: 1800 }],
        'largest-contentful-paint': ['error', { maxNumericValue: 2500 }],
        'cumulative-layout-shift': ['error', { maxNumericValue: 0.1 }],
        'total-blocking-time': ['warn', { maxNumericValue: 300 }],
        'interactive': ['warn', { maxNumericValue: 3800 }],
      },
    },
    upload: {
      target: 'temporary-public-storage', // or 'lhci' for self-hosted
    },
  },
};
// .github/workflows/lighthouse.yml — GitHub Actions integration
// name: Lighthouse CI
// on: [push, pull_request]
// jobs:
//   lighthouse:
//     runs-on: ubuntu-latest
//     steps:
//       - uses: actions/checkout@v4
//       - uses: actions/setup-node@v4
//         with:
//           node-version: 20
//       - run: npm ci
//       - run: npm run build
//       - name: Run Lighthouse CI
//         run: |
//           npm install -g @lhci/cli@0.13.x
//           lhci autorun
//         env:
//           LHCI_GITHUB_APP_TOKEN: secrets.LHCI_GITHUB_APP_TOKEN

// Custom Lighthouse audit script for CI
import { execSync } from 'child_process';
import * as fs from 'fs';

interface BudgetCheck {
  metric: string;
  actual: number;
  budget: number;
  passed: boolean;
}

function checkPerformanceBudget(reportPath: string): BudgetCheck[] {
  const report = JSON.parse(fs.readFileSync(reportPath, 'utf-8'));
  const audits = report.audits;

  const budgets: Record<string, number> = {
    'first-contentful-paint': 1800,
    'largest-contentful-paint': 2500,
    'total-blocking-time': 300,
    'cumulative-layout-shift': 0.1,
    'speed-index': 3400,
  };

  return Object.entries(budgets).map(([metric, budget]) => ({
    metric,
    actual: audits[metric]?.numericValue ?? 0,
    budget,
    passed: (audits[metric]?.numericValue ?? 0) <= budget,
  }));
}

// Usage in CI script
const checks = checkPerformanceBudget('./lighthouse-report.json');
const failures = checks.filter(c => !c.passed);

if (failures.length > 0) {
  console.error('Performance budget exceeded:');
  failures.forEach(f => {
    console.error(`  ${f.metric}: ${f.actual} (budget: ${f.budget})`);
  });
  process.exit(1);
}

PageSpeed Insights API

The PageSpeed Insights API provides both lab data (from Lighthouse) and field data (from the Chrome UX Report). Field data is especially valuable because it reflects real user experiences across diverse devices and network conditions.

// Querying the PageSpeed Insights API
interface PSIResponse {
  loadingExperience: {
    metrics: Record<string, {
      percentile: number;
      category: string;
    }>;
  };
  lighthouseResult: {
    categories: Record<string, { score: number }>;
    audits: Record<string, { numericValue: number; displayValue: string }>;
  };
}

async function getPageSpeedInsights(
  url: string,
  strategy: 'mobile' | 'desktop' = 'mobile'
): Promise<void> {
  const apiKey = process.env.PSI_API_KEY;
  const apiUrl = `https://www.googleapis.com/pagespeedonline/v5/runPagespeed?url=${encodeURIComponent(url)}&strategy=${strategy}&key=${apiKey}`;

  const response = await fetch(apiUrl);
  const data: PSIResponse = await response.json();

  // Field data (CrUX)
  const fieldMetrics = data.loadingExperience.metrics;
  console.log('Field Data (Real Users):');
  Object.entries(fieldMetrics).forEach(([metric, data]) => {
    console.log(`  ${metric}: p75=${data.percentile} (${data.category})`);
  });

  // Lab data (Lighthouse)
  const labScore = data.lighthouseResult.categories.performance.score * 100;
  console.log(`\nLab Score: ${labScore}`);

  const audits = data.lighthouseResult.audits;
  console.log('Lab Metrics:');
  console.log(`  FCP: ${audits['first-contentful-paint'].displayValue}`);
  console.log(`  LCP: ${audits['largest-contentful-paint'].displayValue}`);
  console.log(`  TBT: ${audits['total-blocking-time'].displayValue}`);
  console.log(`  CLS: ${audits['cumulative-layout-shift'].displayValue}`);
}

Key Takeaways

  • Automate audits: Run Lighthouse in CI to catch regressions before they ship
  • Set budgets: Define clear thresholds and fail builds when they are exceeded
  • Field vs lab: Use PageSpeed Insights for field data; Lighthouse for reproducible lab tests
  • Track over time: Store results historically to spot trends and regressions

Continue Learning