| 指标 | 含义 | 健康范围 | 告警阈值 |
|---|
| Event Loop Lag | 事件循环延迟 | < 10ms | > 100ms |
| Callback Duration | 单个回调执行时间 | < 1ms | > 10ms |
| Request Latency | 请求响应延迟 | < 50ms | > 500ms |
| Throughput | 吞吐量(请求/秒) | 依场景 | 低于基线 50% |
| Memory Usage | 内存使用量 | 稳定 | 持续增长 |
| Active Handles | 活跃句柄数 | 稳定 | 持续增长 |
let lastTime = Date.now();
setInterval(() => {
const now = Date.now();
const lag = now - lastTime - 1000;
lastTime = now;
if (lag > 50) {
console.warn(`Event loop lag: ${lag}ms`);
}
}, 1000);
const { monitorEventLoopDelay } = require("perf_hooks");
const histogram = monitorEventLoopDelay({ resolution: 20 });
histogram.enable();
setInterval(() => {
console.log({
min: histogram.min / 1e6,
max: histogram.max / 1e6,
mean: histogram.mean / 1e6,
p50: histogram.percentile(50) / 1e6,
p99: histogram.percentile(99) / 1e6,
});
histogram.reset();
}, 5000);
const blocked = require("blocked-at");
blocked(
(time, stack) => {
console.log(`Blocked for ${time}ms, operation started here:`, stack);
},
{ threshold: 50 }
);
const { Worker, isMainThread, parentPort } = require("worker_threads");
function blockingSort(data) {
return data.sort((a, b) => {
return complexCompare(a, b);
});
}
function sortInWorker(data) {
return new Promise((resolve, reject) => {
const worker = new Worker(
`
const { parentPort, workerData } = require('worker_threads');
const sorted = workerData.sort((a, b) => a - b);
parentPort.postMessage(sorted);
`,
{
eval: true,
workerData: data,
}
);
worker.on("message", resolve);
worker.on("error", reject);
});
}
async function nonBlockingSort(data, chunkSize = 1000) {
const chunks = [];
for (let i = 0; i < data.length; i += chunkSize) {
chunks.push(data.slice(i, i + chunkSize).sort((a, b) => a - b));
await new Promise((resolve) => setImmediate(resolve));
}
return mergeChunks(chunks);
}
class DataProcessor {
constructor() {
this.cache = new Map();
}
process(data) {
this.cache.set(data.id, data);
}
}
class LRUCache {
constructor(maxSize = 1000) {
this.maxSize = maxSize;
this.cache = new Map();
}
get(key) {
if (this.cache.has(key)) {
const value = this.cache.get(key);
this.cache.delete(key);
this.cache.set(key, value);
return value;
}
return null;
}
set(key, value) {
if (this.cache.has(key)) {
this.cache.delete(key);
} else if (this.cache.size >= this.maxSize) {
const firstKey = this.cache.keys().next().value;
this.cache.delete(firstKey);
}
this.cache.set(key, value);
}
}
class ObjectCache {
constructor() {
this.cache = new Map();
this.registry = new FinalizationRegistry((key) => {
this.cache.delete(key);
});
}
set(key, value) {
const ref = new WeakRef(value);
this.cache.set(key, ref);
this.registry.register(value, key);
}
get(key) {
const ref = this.cache.get(key);
if (ref) {
const value = ref.deref();
if (value) return value;
this.cache.delete(key);
}
return null;
}
}
const fs = require("fs");
const { pipeline, Transform } = require("stream");
app.get("/data", async (req, res) => {
const data = await fs.promises.readFile("large-file.json");
const parsed = JSON.parse(data);
const filtered = parsed.filter((item) => item.active);
res.json(filtered);
});
const { createReadStream } = require("fs");
const JSONStream = require("JSONStream");
app.get("/data", (req, res) => {
res.setHeader("Content-Type", "application/json");
const stream = createReadStream("large-file.json")
.pipe(JSONStream.parse("*"))
.pipe(
new Transform({
objectMode: true,
transform(item, encoding, callback) {
if (item.active) {
callback(null, JSON.stringify(item) + "\n");
} else {
callback();
}
},
})
);
pipeline(stream, res, (err) => {
if (err) console.error("Pipeline failed:", err);
});
});
import uvloop
import asyncio
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
import asyncio
import aiohttp
async def bad_example():
result1 = await fetch("url1")
result2 = await fetch("url2")
result3 = await fetch("url3")
async def good_example():
result1, result2, result3 = await asyncio.gather(
fetch("url1"),
fetch("url2"),
fetch("url3"),
)
async def controlled_concurrency(urls: list, max_concurrent: int = 10):
semaphore = asyncio.Semaphore(max_concurrent)
async def fetch_with_limit(url):
async with semaphore:
return await fetch(url)
return await asyncio.gather(*[fetch_with_limit(url) for url in urls])
import asyncio
import time
async def bad_sync_io():
time.sleep(1)
import requests
response = requests.get("http://example.com")
with open("file.txt", "r") as f:
data = f.read()
async def good_async_io():
await asyncio.sleep(1)
async with aiohttp.ClientSession() as session:
async with session.get("http://example.com") as response:
data = await response.text()
import aiofiles
async with aiofiles.open("file.txt", "r") as f:
data = await f.read()
async def mixed_io():
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(
None,
synchronous_blocking_function
)
import aiohttp
import asyncio
async def bad_requests(urls):
for url in urls:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
pass
async def good_requests(urls):
connector = aiohttp.TCPConnector(
limit=100,
limit_per_host=10,
ttl_dns_cache=300,
enable_cleanup_closed=True,
)
async with aiohttp.ClientSession(connector=connector) as session:
tasks = [
session.get(url) for url in urls
]
responses = await asyncio.gather(*tasks)
node --inspect app.js
ndb app.js
const async_hooks = require("async_hooks");
const fs = require("fs");
const hook = async_hooks.createHook({
init(asyncId, type, triggerAsyncId) {
fs.writeSync(1, `INIT: ${type}(${asyncId}) trigger: ${triggerAsyncId}\n`);
},
before(asyncId) {
fs.writeSync(1, `BEFORE: ${asyncId}\n`);
},
after(asyncId) {
fs.writeSync(1, `AFTER: ${asyncId}\n`);
},
destroy(asyncId) {
fs.writeSync(1, `DESTROY: ${asyncId}\n`);
},
});
hook.enable();
const v8 = require("v8");
const fs = require("fs");
function takeHeapSnapshot() {
const snapshotStream = v8.writeHeapSnapshot();
console.log(`Heap snapshot written to: ${snapshotStream}`);
}
setInterval(() => {
const usage = process.memoryUsage();
console.log({
rss: `${(usage.rss / 1024 / 1024).toFixed(2)} MB`,
heapTotal: `${(usage.heapTotal / 1024 / 1024).toFixed(2)} MB`,
heapUsed: `${(usage.heapUsed / 1024 / 1024).toFixed(2)} MB`,
external: `${(usage.external / 1024 / 1024).toFixed(2)} MB`,
});
}, 10000);
let prevHeapUsed = 0;
setInterval(() => {
const { heapUsed } = process.memoryUsage();
const diff = heapUsed - prevHeapUsed;
if (diff > 10 * 1024 * 1024) {
console.warn(`Memory increased by ${(diff / 1024 / 1024).toFixed(2)} MB`);
takeHeapSnapshot();
}
prevHeapUsed = heapUsed;
}, 30000);
import asyncio
import traceback
import sys
asyncio.run(main(), debug=True)
def exception_handler(loop, context):
"""全局异常处理"""
exception = context.get('exception')
message = context.get('message', 'Unknown error')
print(f"Event loop exception: {message}")
if exception:
traceback.print_exception(
type(exception),
exception,
exception.__traceback__
)
loop = asyncio.new_event_loop()
loop.set_exception_handler(exception_handler)
import time
class SlowCallbackDetector:
"""检测慢回调"""
def __init__(self, threshold_ms: float = 100):
self.threshold_ms = threshold_ms
def __call__(self, handle):
"""包装回调来检测慢执行"""
start = time.monotonic()
handle._run()
elapsed = (time.monotonic() - start) * 1000
if elapsed > self.threshold_ms:
print(f"Slow callback detected: {elapsed:.2f}ms")
print(f" Callback: {handle._callback}")
if handle._source_traceback:
print(f" Created at:")
for line in handle._source_traceback:
print(f" {line}")
const promClient = require("prom-client");
promClient.collectDefaultMetrics({ timeout: 5000 });
const eventLoopLag = new promClient.Histogram({
name: "event_loop_lag_seconds",
help: "Event loop lag in seconds",
buckets: [0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.5, 1],
});
const httpDuration = new promClient.Histogram({
name: "http_request_duration_seconds",
help: "HTTP request duration in seconds",
labelNames: ["method", "path", "status"],
buckets: [0.01, 0.05, 0.1, 0.5, 1, 5],
});
const activeConnections = new promClient.Gauge({
name: "active_connections",
help: "Number of active connections",
});
function metricsMiddleware(req, res, next) {
const start = process.hrtime.bigint();
res.on("finish", () => {
const duration = Number(process.hrtime.bigint() - start) / 1e9;
httpDuration.observe(
{ method: req.method, path: req.path, status: res.statusCode },
duration
);
});
next();
}
app.get("/metrics", async (req, res) => {
res.set("Content-Type", promClient.register.contentType);
res.end(await promClient.register.metrics());
});
const emitter = new EventEmitter();
emitter.setMaxListeners(0);
function addListener() {
emitter.on("data", (data) => {
});
}
function fixedAddListener() {
const handler = (data) => {
};
emitter.on("data", handler);
return () => emitter.off("data", handler);
}
function createClosure() {
const largeData = new Array(1000000).fill("x");
return function () {
console.log("hello");
};
}
function fixedClosure() {
const largeData = new Array(1000000).fill("x");
const result = processData(largeData);
return function () {
console.log(result);
};
}
const cache = {};
const boundedCache = new Map();
const MAX_CACHE_SIZE = 1000;
| 工具 | 语言 | 特点 | 适用场景 |
|---|
| wrk | C | 多线程,高性能 | HTTP 基准测试 |
| autocannon | Node.js | 原生支持,管道模式 | Node.js 服务压测 |
| locust | Python | 分布式,脚本灵活 | 复杂场景压测 |
| k6 | Go | 现代化,支持 JS 脚本 | 云原生压测 |
| ab | C | Apache 自带,简单 | 快速测试 |
wrk -t10 -c100 -d30s http://localhost:3000/
wrk -t10 -c100 -d30s -s post.lua http://localhost:3000/api/data
const autocannon = require("autocannon");
const instance = autocannon(
{
url: "http://localhost:3000",
connections: 100,
pipelining: 10,
duration: 30,
},
(err, result) => {
console.log("结果:");
console.log(` 请求总数: ${result.requests.total}`);
console.log(` 平均延迟: ${result.latency.average}ms`);
console.log(` 最大延迟: ${result.latency.max}ms`);
console.log(` P99延迟: ${result.latency.p99}ms`);
console.log(` 吞吐量: ${result.throughput.average} bytes/sec`);
console.log(` 请求/秒: ${result.requests.average}`);
}
);
const server = http.createServer(app);
let isShuttingDown = false;
function gracefulShutdown(signal) {
console.log(`Received ${signal}, starting graceful shutdown...`);
isShuttingDown = true;
server.close(() => {
console.log("HTTP server closed");
Promise.all([database.close(), redis.close()])
.then(() => {
console.log("All connections closed");
process.exit(0);
})
.catch((err) => {
console.error("Error during shutdown:", err);
process.exit(1);
});
});
setTimeout(() => {
console.error("Could not close connections in time, forcing shutdown");
process.exit(1);
}, 30000);
}
process.on("SIGTERM", () => gracefulShutdown("SIGTERM"));
process.on("SIGINT", () => gracefulShutdown("SIGINT"));
app.get("/health", (req, res) => {
if (isShuttingDown) {
res.status(503).json({ status: "shutting_down" });
} else {
res.json({ status: "healthy" });
}
});
import asyncio
import signal
from fastapi import FastAPI
app = FastAPI()
shutdown_event = asyncio.Event()
async def graceful_shutdown():
"""优雅关闭"""
print("Starting graceful shutdown...")
shutdown_event.set()
await asyncio.sleep(5)
await database.close()
await redis.close()
print("Graceful shutdown complete")
def handle_signal(sig):
print(f"Received signal {sig}")
asyncio.create_task(graceful_shutdown())
loop = asyncio.get_event_loop()
for sig in (signal.SIGTERM, signal.SIGINT):
loop.add_signal_handler(sig, handle_signal, sig)
- 先测量,再优化:不要盲目优化,用数据驱动决策
- 避免过早优化:先保证正确性,再追求性能
- 关注瓶颈:优化最慢的环节(木桶效应)
- 持续监控:性能问题往往是渐进式的
- 压测验证:优化后用压测验证效果