Appearance
数据库性能优化
数据库性能优化是提升应用程序整体性能的关键环节。本章将详细介绍数据库性能优化的各种技术、策略和最佳实践。
数据库架构优化
读写分离
sql
-- 1. 主从复制配置 (MySQL)
-- 主库配置 (my.cnf)
[mysqld]
server-id = 1
log-bin = mysql-bin
binlog-format = ROW
binlog-do-db = your_database
expire_logs_days = 7
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
-- 从库配置 (my.cnf)
[mysqld]
server-id = 2
relay-log = mysql-relay-bin
read_only = 1
replicate-do-db = your_database
-- 2. 设置主从复制
-- 在主库上
CREATE USER 'repl_user'@'%' IDENTIFIED BY 'repl_password';
GRANT REPLICATION SLAVE ON *.* TO 'repl_user'@'%';
FLUSH PRIVILEGES;
-- 获取主库状态
SHOW MASTER STATUS;
-- 在从库上
CHANGE MASTER TO
MASTER_HOST='master_ip',
MASTER_USER='repl_user',
MASTER_PASSWORD='repl_password',
MASTER_LOG_FILE='mysql-bin.000001',
MASTER_LOG_POS=154;
START SLAVE;
SHOW SLAVE STATUS\G
javascript
// 3. 应用层读写分离实现
class ReadWriteSeparation {
constructor(masterConfig, slaveConfigs) {
this.master = mysql.createPool(masterConfig);
this.slaves = slaveConfigs.map(config => mysql.createPool(config));
this.currentSlaveIndex = 0;
}
async executeWrite(query, params) {
console.log('Executing write query on master:', query);
const [results] = await this.master.execute(query, params);
return results;
}
async executeRead(query, params) {
// 轮询选择从库
const slave = this.slaves[this.currentSlaveIndex];
this.currentSlaveIndex = (this.currentSlaveIndex + 1) % this.slaves.length;
console.log('Executing read query on slave:', query);
const [results] = await slave.execute(query, params);
return results;
}
// 智能路由
async execute(sql, params) {
const sqlType = this.getSqlType(sql);
if (this.isWriteQuery(sqlType)) {
return await this.executeWrite(sql, params);
} else {
return await this.executeRead(sql, params);
}
}
getSqlType(sql) {
const trimmedSql = sql.trim().toUpperCase();
if (trimmedSql.startsWith('SELECT')) return 'SELECT';
if (trimmedSql.startsWith('INSERT')) return 'INSERT';
if (trimmedSql.startsWith('UPDATE')) return 'UPDATE';
if (trimmedSql.startsWith('DELETE')) return 'DELETE';
if (trimmedSql.startsWith('REPLACE')) return 'REPLACE';
return 'OTHER';
}
isWriteQuery(sqlType) {
return ['INSERT', 'UPDATE', 'DELETE', 'REPLACE'].includes(sqlType);
}
}
分库分表
sql
-- 1. 水平分表 (按时间)
CREATE TABLE orders_202301 (
id INT AUTO_INCREMENT,
user_id INT,
order_date DATE,
amount DECIMAL(10,2),
PRIMARY KEY (id, order_date)
) PARTITION BY HASH(MONTH(order_date)) PARTITIONS 4;
CREATE TABLE orders_202302 (
id INT AUTO_INCREMENT,
user_id INT,
order_date DATE,
amount DECIMAL(10,2),
PRIMARY KEY (id, order_date)
) PARTITION BY HASH(MONTH(order_date)) PARTITIONS 4;
-- 2. 垂直分表
-- 原表
CREATE TABLE users (
id INT AUTO_INCREMENT PRIMARY KEY,
username VARCHAR(50),
email VARCHAR(100),
password_hash VARCHAR(255),
profile TEXT, -- 详细信息
settings JSON -- 用户设置
);
-- 优化后:拆分为基本信息表和详细信息表
CREATE TABLE user_basic (
id INT AUTO_INCREMENT PRIMARY KEY,
username VARCHAR(50) UNIQUE,
email VARCHAR(100) UNIQUE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE user_profile (
user_id INT PRIMARY KEY,
profile TEXT,
settings JSON,
FOREIGN KEY (user_id) REFERENCES user_basic(id)
);
-- 3. 分库策略
-- 按用户ID哈希分库
CREATE DATABASE shard_0;
CREATE DATABASE shard_1;
CREATE DATABASE shard_2;
CREATE DATABASE shard_3;
-- 每个分库中创建相同的表结构
USE shard_0;
CREATE TABLE user_shard (
id INT AUTO_INCREMENT PRIMARY KEY,
user_id INT,
username VARCHAR(50),
email VARCHAR(100),
INDEX idx_user_id (user_id)
);
javascript
// 4. 分库分表路由实现
class DatabaseSharding {
constructor(shards) {
this.shards = shards;
this.pools = {};
// 创建连接池
shards.forEach((config, index) => {
this.pools[index] = mysql.createPool(config);
});
}
// 基于用户ID的哈希分片
getShardId(userId, shardCount = 4) {
return userId % shardCount;
}
// 基于范围的分片
getRangeShardId(value, ranges) {
for (let i = 0; i < ranges.length; i++) {
if (value <= ranges[i].max) {
return ranges[i].shardId;
}
}
return 0; // 默认分片
}
async executeByUserId(userId, query, params) {
const shardId = this.getShardId(userId);
const pool = this.pools[shardId];
const [results] = await pool.execute(query, params);
return results;
}
// 跨分片查询(需要合并结果)
async executeAcrossShards(query, params) {
const promises = Object.values(this.pools).map(pool =>
pool.execute(query, params).catch(err => {
console.error('Shard query error:', err);
return [[]]; // 返回空结果避免中断其他分片
})
);
const results = await Promise.all(promises);
// 合并所有分片的结果
return results.flat().flat();
}
}
索引优化
索引设计原则
sql
-- 1. 索引选择性分析
-- 选择性 = 不重复值的数量 / 总行数
SELECT
COUNT(DISTINCT status) / COUNT(*) AS selectivity,
COUNT(*) AS total_rows
FROM users;
-- 2. 复合索引设计
-- 最左前缀原则:查询条件必须包含索引的最左边列
CREATE INDEX idx_users_status_created ON users(status, created_at DESC, username);
-- 有效的查询
SELECT * FROM users WHERE status = 'active';
SELECT * FROM users WHERE status = 'active' AND created_at > '2023-01-01';
SELECT * FROM users WHERE status = 'active' AND created_at > '2023-01-01' AND username LIKE 'john%';
-- 无效的查询(无法使用索引)
SELECT * FROM users WHERE created_at > '2023-01-01'; -- 缺少最左列
SELECT * FROM users WHERE username LIKE 'john%'; -- 缺少最左列
-- 3. 覆盖索引
-- 创建包含所有需要字段的索引,避免回表查询
CREATE INDEX idx_orders_covering ON orders(user_id, status, total_amount, created_at);
-- 这样的查询可以直接从索引获取数据
SELECT user_id, status, total_amount, created_at
FROM orders
WHERE user_id = 123 AND status = 'completed';
-- 4. 前缀索引(针对长字符串字段)
-- 对于很长的字符串字段,创建前缀索引
CREATE INDEX idx_email_prefix ON users(email(10)); -- 使用前10个字符
-- 5. 函数索引(MySQL 8.0+)
CREATE INDEX idx_lower_email ON users((LOWER(email))); -- 虚拟列索引
-- 6. 部分索引(MySQL 8.0+)
CREATE INDEX idx_active_users ON users(username, email)
WHERE status = 'active'; -- 仅对活跃用户创建索引
索引维护和监控
sql
-- 1. 查找未使用的索引
SELECT
s.object_schema,
s.object_name,
s.index_name,
io.count_read,
io.count_write
FROM performance_schema.table_io_waits_summary_by_index_usage io
JOIN information_schema.statistics s
ON s.table_name = io.object_name
AND s.index_name = io.index_name
WHERE io.count_read = 0
AND io.object_schema NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys')
AND s.index_name IS NOT NULL
ORDER BY io.count_write DESC;
-- 2. 查找重复索引
SELECT
table_name,
index_name,
GROUP_CONCAT(column_name ORDER BY seq_in_index) as columns
FROM information_schema.statistics
WHERE table_schema = 'your_database'
GROUP BY table_name, index_name
HAVING COUNT(*) > 1;
-- 3. 索引统计信息更新
ANALYZE TABLE users;
ANALYZE TABLE orders;
-- 4. 查看索引使用情况
SHOW INDEX FROM users;
SHOW TABLE STATUS LIKE 'users';
-- 5. 索引碎片整理
OPTIMIZE TABLE users;
OPTIMIZE TABLE orders;
查询优化
EXPLAIN 分析
sql
-- 1. 基本 EXPLAIN 使用
EXPLAIN SELECT u.*, p.email
FROM users u
JOIN profiles p ON u.id = p.user_id
WHERE u.status = 'active'
ORDER BY u.created_at DESC
LIMIT 20;
-- 2. EXPLAIN FORMAT=JSON 详细分析
EXPLAIN FORMAT=JSON
SELECT u.*, p.email
FROM users u
JOIN profiles p ON u.id = p.user_id
WHERE u.status = 'active'
ORDER BY u.created_at DESC
LIMIT 20;
-- 3. 性能_schema 分析查询
SELECT
DIGEST_TEXT,
COUNT_STAR,
AVG_TIMER_WAIT/1000000000 AS avg_time_s,
SUM_ROWS_EXAMINED,
SUM_ROWS_SENT
FROM performance_schema.events_statements_summary_by_digest
WHERE DIGEST_TEXT LIKE '%users%'
ORDER BY AVG_TIMER_WAIT DESC
LIMIT 10;
查询优化技术
sql
-- 1. JOIN 优化
-- 优化前:使用隐式 JOIN
SELECT * FROM large_table1 l1, large_table2 l2
WHERE l1.id = l2.large_table1_id AND l1.status = 'active';
-- 优化后:使用显式 JOIN 和适当的索引
SELECT l1.*, l2.*
FROM large_table1 l1
INNER JOIN large_table2 l2 ON l1.id = l2.large_table1_id
WHERE l1.status = 'active'
AND INDEX(l1.status); -- 确保 status 字段有索引
-- 2. 子查询优化
-- 优化前:使用子查询
SELECT * FROM users
WHERE id IN (SELECT user_id FROM orders WHERE total > 1000);
-- 优化后:使用 JOIN
SELECT DISTINCT u.*
FROM users u
INNER JOIN orders o ON u.id = o.user_id
WHERE o.total > 1000;
-- 3. EXISTS 优化
-- 优化前:使用 IN
SELECT * FROM users
WHERE id IN (SELECT user_id FROM user_preferences WHERE theme = 'dark');
-- 优化后:使用 EXISTS
SELECT * FROM users u
WHERE EXISTS (
SELECT 1 FROM user_preferences up
WHERE up.user_id = u.id AND up.theme = 'dark'
);
-- 4. GROUP BY 优化
-- 优化前:复杂的 GROUP BY
SELECT
u.department,
COUNT(*),
AVG(u.salary),
MAX(o.total_amount)
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
GROUP BY u.department, u.region, u.level;
-- 优化后:减少分组字段
SELECT
department,
COUNT(*),
AVG(avg_salary),
MAX(max_order)
FROM (
SELECT
u.department,
u.salary as avg_salary,
COALESCE(o.total_amount, 0) as max_order
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
) t
GROUP BY department;
-- 5. 分页优化
-- 优化前:使用 OFFSET(大数据集时很慢)
SELECT * FROM products
WHERE category = 'electronics'
ORDER BY created_at DESC
LIMIT 20 OFFSET 10000;
-- 优化后:使用游标分页
SELECT * FROM products
WHERE category = 'electronics' AND id < 12345 -- 上一页的最大ID
ORDER BY id DESC
LIMIT 20;
数据库配置优化
MySQL 配置优化
ini
# my.cnf - MySQL 性能优化配置
[mysqld]
# InnoDB 相关配置
innodb_buffer_pool_size = 4G # 重要:设为物理内存的 60-80%
innodb_buffer_pool_instances = 8 # 每个实例不超过 1G
innodb_log_file_size = 512M # 事务日志大小
innodb_log_buffer_size = 16M # 日志缓冲区
innodb_flush_log_at_trx_commit = 2 # 性能与安全平衡
innodb_file_per_table = 1 # 每个表独立文件
innodb_flush_method = O_DIRECT # 直接IO
innodb_read_io_threads = 8 # 读取线程数
innodb_write_io_threads = 8 # 写入线程数
innodb_io_capacity = 2000 # IO容量
innodb_io_capacity_max = 4000 # IO容量最大值
# 连接相关配置
max_connections = 1000 # 最大连接数
max_connect_errors = 100000 # 最大连接错误数
thread_cache_size = 100 # 线程缓存
table_open_cache = 4000 # 表缓存
table_definition_cache = 4000 # 表定义缓存
# 查询缓存 (MySQL 5.7 及以下)
query_cache_type = 1
query_cache_size = 256M
query_cache_limit = 2M
# 临时表配置
tmp_table_size = 128M
max_heap_table_size = 128M
# 排序和连接配置
sort_buffer_size = 2M
read_buffer_size = 1M
read_rnd_buffer_size = 2M
join_buffer_size = 2M
bulk_insert_buffer_size = 64M
# 日志配置
log_error = /var/log/mysql/error.log
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1
log_queries_not_using_indexes = 1
log_slow_admin_statements = 1
log_slow_slave_statements = 1
# 安全配置
local-infile = 0
PostgreSQL 配置优化
ini
# postgresql.conf - PostgreSQL 性能优化配置
# 内存相关
shared_buffers = 2GB # 共享缓冲区,通常设为物理内存的 25%
effective_cache_size = 6GB # OS和PosgreSQL共用的磁盘缓存估计
work_mem = 16MB # 每个内部排序操作和哈希表使用的内存量
maintenance_work_mem = 512MB # 维护操作的内存
# WAL 相关
wal_buffers = 16MB # WAL 缓冲区
checkpoint_completion_target = 0.9 # 检查点完成目标
wal_writer_delay = 200ms # WAL写入延迟
max_wal_size = 4GB # 最大WAL大小
min_wal_size = 1GB # 最小WAL大小
# 并发相关
max_worker_processes = 8 # 最大工作进程数
max_parallel_workers_per_gather = 4 # 每个查询的最大并行工作者数
max_parallel_workers = 8 # 系统范围内的最大并行工作者数
# 连接相关
max_connections = 200 # 最大连接数
superuser_reserved_connections = 10 # 为超级用户保留的连接数
# 其他
random_page_cost = 1.1 # 随机页面读取的成本估计
effective_io_concurrency = 200 # 同时进行的IO操作数
数据库监控
性能监控查询
sql
-- 1. 当前运行的查询
SELECT
ID,
USER,
HOST,
DB,
COMMAND,
TIME,
STATE,
LEFT(INFO, 100) AS info
FROM information_schema.PROCESSLIST
WHERE COMMAND != 'Sleep'
ORDER BY TIME DESC;
-- 2. 检查锁等待情况
SELECT
r.trx_id waiting_trx_id,
r.trx_mysql_thread_id waiting_thread,
r.trx_query waiting_query,
b.trx_id blocking_trx_id,
b.trx_mysql_thread_id blocking_thread,
b.trx_query blocking_query
FROM information_schema.INNODB_LOCKS w
INNER JOIN information_schema.INNODB_LOCKS b
ON w.lock_table = b.lock_table
AND w.lock_index = b.lock_index;
-- 3. 检查慢查询
SELECT
start_time,
user_host,
query_time,
lock_time,
rows_sent,
rows_examined,
db,
sql_text
FROM mysql.slow_log
ORDER BY query_time DESC
LIMIT 10;
-- 4. 表统计信息
SELECT
TABLE_NAME,
TABLE_ROWS,
DATA_LENGTH,
INDEX_LENGTH,
DATA_FREE,
CREATE_TIME,
UPDATE_TIME
FROM information_schema.TABLES
WHERE TABLE_SCHEMA = 'your_database_name'
ORDER BY DATA_LENGTH DESC;
-- 5. 索引使用统计
SELECT
OBJECT_SCHEMA,
OBJECT_NAME,
COUNT_READ,
COUNT_WRITE,
SUM_TIMER_WAIT/1000000000 AS total_time_s
FROM performance_schema.table_io_waits_summary_by_table
WHERE OBJECT_SCHEMA = 'your_database_name'
ORDER BY SUM_TIMER_WAIT DESC;
自定义监控脚本
javascript
// 数据库性能监控类
class DatabasePerformanceMonitor {
constructor(connection) {
this.connection = connection;
this.metrics = {};
}
async collectMetrics() {
const metrics = {
connections: await this.getConnectionMetrics(),
queries: await this.getQueryMetrics(),
locks: await this.getLockMetrics(),
cache: await this.getCacheMetrics(),
tables: await this.getTableMetrics()
};
this.metrics = metrics;
return metrics;
}
async getConnectionMetrics() {
const [rows] = await this.connection.execute(`
SELECT
COUNT(*) as total_connections,
SUM(IF(COMMAND = 'Sleep', 1, 0)) as sleeping_connections,
MAX(TIME) as max_execution_time
FROM information_schema.PROCESSLIST
`);
return rows[0];
}
async getQueryMetrics() {
const [rows] = await this.connection.execute(`
SELECT
COUNT(*) as total_queries,
AVG(query_time) as avg_query_time,
MAX(query_time) as max_query_time,
SUM(rows_examined) as total_rows_examined
FROM mysql.slow_log
WHERE start_time > DATE_SUB(NOW(), INTERVAL 1 HOUR)
`);
return rows[0];
}
async getLockMetrics() {
const [rows] = await this.connection.execute(`
SELECT
COUNT(*) as lock_waits,
AVG(wait_time) as avg_wait_time
FROM performance_schema.data_lock_waits
`);
return rows[0];
}
async getCacheMetrics() {
const [rows] = await this.connection.execute(`
SELECT
VARIABLE_VALUE as buffer_pool_size
FROM information_schema.GLOBAL_STATUS
WHERE VARIABLE_NAME = 'Innodb_buffer_pool_pages_total'
`);
return rows[0];
}
async getTableMetrics() {
const [rows] = await this.connection.execute(`
SELECT
SUM(TABLE_ROWS) as total_rows,
SUM(DATA_LENGTH) as total_data_size,
SUM(INDEX_LENGTH) as total_index_size
FROM information_schema.TABLES
WHERE TABLE_SCHEMA = DATABASE()
`);
return rows[0];
}
async detectPerformanceIssues() {
const metrics = await this.collectMetrics();
const issues = [];
// 检查连接数过多
if (metrics.connections.total_connections > 800) {
issues.push(`High connection count: ${metrics.connections.total_connections}`);
}
// 检查睡眠连接过多
if (metrics.connections.sleeping_connections > 500) {
issues.push(`High sleeping connection count: ${metrics.connections.sleeping_connections}`);
}
// 检查平均查询时间过长
if (metrics.queries.avg_query_time > 1) {
issues.push(`High average query time: ${metrics.queries.avg_query_time}s`);
}
return issues;
}
}
数据库优化工具
MySQLTuner
bash
#!/bin/bash
# MySQLTuner 性能分析脚本
# 下载和使用
wget http://mysqltuner.pl/ -O mysqltuner.pl
chmod +x mysqltuner.pl
# 运行分析
perl mysqltuner.pl --host localhost --user root --pass your_password
# 重点关注的指标:
# - Buffer pool hit rate (应 > 95%)
# - Key buffer hit rate (应 > 95%)
# - Query cache hit rate (应 > 70%)
# - Connections usage (应 < 80%)
Percona Toolkit
bash
# Percona Toolkit 性能分析工具集
# 1. 分析慢查询日志
pt-query-digest /var/log/mysql/slow.log
# 2. 检查表结构和索引
pt-online-schema-change --alter "ADD COLUMN new_field INT" D=database_name,t=table_name --dry-run
# 3. 监控复制延迟
pt-heartbeat --monitor --database percona --table heartbeat --host replica_host
# 4. 查找重复索引
pt-duplicate-key-checker --host=localhost --user=root --password=your_password
实际优化案例
电商系统数据库优化案例
sql
-- 电商系统数据库优化实例
-- 1. 订单表优化
-- 优化前:单一的大表
CREATE TABLE orders_old (
id BIGINT AUTO_INCREMENT PRIMARY KEY,
user_id INT,
order_number VARCHAR(50),
status ENUM('pending', 'confirmed', 'shipped', 'delivered', 'cancelled'),
total_amount DECIMAL(10,2),
shipping_address TEXT,
billing_address TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_user_id (user_id),
INDEX idx_status (status),
INDEX idx_created_at (created_at)
);
-- 优化后:分表 + 优化索引
-- 按时间分区
CREATE TABLE orders_optimized (
id BIGINT AUTO_INCREMENT,
user_id INT UNSIGNED,
order_number VARCHAR(50) UNIQUE,
status TINYINT UNSIGNED,
total_amount DECIMAL(10,2),
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (id, created_at),
INDEX idx_user_status (user_id, status),
INDEX idx_status_created (status, created_at),
INDEX idx_user_created (user_id, created_at)
)
PARTITION BY RANGE (YEAR(created_at)) (
PARTITION p2022 VALUES LESS THAN (2023),
PARTITION p2023 VALUES LESS THAN (2024),
PARTITION p2024 VALUES LESS THAN (2025),
PARTITION p_future VALUES LESS THAN MAXVALUE
);
-- 状态枚举转换为数字
-- pending: 1, confirmed: 2, shipped: 3, delivered: 4, cancelled: 5
javascript
// 2. 电商系统查询优化实现
class EcommerceDatabaseOptimizer {
constructor(connection) {
this.connection = connection;
}
// 优化商品搜索查询
async searchProducts(searchParams) {
const {
keywords = '',
categoryId,
minPrice,
maxPrice,
sortBy = 'relevance',
page = 1,
limit = 20
} = searchParams;
// 使用全文索引优化关键词搜索
let query = `
SELECT
p.id,
p.name,
p.price,
p.category_id,
p.rating,
p.review_count,
i.thumbnail_url,
MATCH(p.name, p.description) AGAINST(? IN NATURAL LANGUAGE MODE) as relevance_score
FROM products p
LEFT JOIN product_images i ON p.id = i.product_id AND i.is_primary = 1
WHERE 1=1
`;
const params = [keywords];
if (keywords) {
query += ` AND MATCH(p.name, p.description) AGAINST(? IN NATURAL LANGUAGE MODE)`;
params.push(keywords);
}
if (categoryId) {
query += ' AND p.category_id = ?';
params.push(categoryId);
}
if (minPrice) {
query += ' AND p.price >= ?';
params.push(minPrice);
}
if (maxPrice) {
query += ' AND p.price <= ?';
params.push(maxPrice);
}
// 使用游标分页而不是 OFFSET
const offset = (page - 1) * limit;
query += ` ORDER BY ${this.getSortExpression(sortBy)} LIMIT ? OFFSET ?`;
params.push(limit, offset);
const [results] = await this.connection.execute(query, params);
return results;
}
getSortExpression(sortBy) {
switch(sortBy) {
case 'price_asc': return 'p.price ASC';
case 'price_desc': return 'p.price DESC';
case 'rating': return 'p.rating DESC, p.review_count DESC';
case 'newest': return 'p.created_at DESC';
case 'relevance': return 'relevance_score DESC';
default: return 'p.id DESC';
}
}
// 优化用户订单查询
async getUserOrders(userId, statusFilters = [], page = 1, limit = 10) {
let query = `
SELECT
o.id,
o.order_number,
o.total_amount,
o.status,
o.created_at,
COUNT(oi.id) as item_count,
SUM(oi.quantity * oi.unit_price) as calculated_total
FROM orders_optimized o
LEFT JOIN order_items oi ON o.id = oi.order_id
`;
const params = [userId];
// 使用参数化查询防止 SQL 注入
query += ' WHERE o.user_id = ?';
if (statusFilters && statusFilters.length > 0) {
query += ` AND o.status IN (${statusFilters.map(() => '?').join(',')})`;
params.push(...statusFilters);
}
query += `
GROUP BY o.id
ORDER BY o.created_at DESC
LIMIT ? OFFSET ?
`;
params.push(limit, (page - 1) * limit);
const [orders] = await this.connection.execute(query, params);
return orders;
}
// 批量订单处理优化
async batchUpdateOrderStatus(orderIds, newStatus) {
if (!orderIds || orderIds.length === 0) return 0;
const placeholders = orderIds.map(() => '?').join(',');
const query = `
UPDATE orders_optimized
SET status = ?, updated_at = NOW()
WHERE id IN (${placeholders})
`;
const params = [newStatus, ...orderIds];
const [result] = await this.connection.execute(query, params);
return result.affectedRows;
}
// 数据归档
async archiveOldOrders(archiveBeforeDate) {
// 将旧订单迁移到归档表
const archiveQuery = `
INSERT INTO orders_archive
SELECT * FROM orders_optimized
WHERE created_at < ? AND status = 'delivered'
`;
await this.connection.execute(archiveQuery, [archiveBeforeDate]);
// 删除已归档的订单
const deleteQuery = `
DELETE FROM orders_optimized
WHERE created_at < ? AND status = 'delivered'
`;
const [result] = await this.connection.execute(deleteQuery, [archiveBeforeDate]);
return result.affectedRows;
}
}
数据库性能优化检查清单
javascript
// 数据库性能优化检查清单
const databaseOptimizationChecklist = {
// 架构优化
architecture: [
'实施读写分离',
'设计合理的分库分表策略',
'使用连接池管理连接',
'配置主从复制',
'实现数据归档策略',
'使用缓存层'
],
// 索引优化
indexing: [
'为经常查询的字段创建索引',
'创建复合索引优化多字段查询',
'使用覆盖索引避免回表',
'定期分析索引使用情况',
'删除未使用的索引',
'为排序和分组字段创建索引'
],
// 查询优化
queries: [
'使用 EXPLAIN 分析查询执行计划',
'避免 SELECT * 查询',
'优化 JOIN 查询',
'使用参数化查询防止注入',
'实现分页优化',
'避免在 WHERE 子句中使用函数'
],
// 配置优化
configuration: [
'调整缓冲区大小',
'优化连接池配置',
'设置合适的超时值',
'配置慢查询日志',
'启用查询缓存',
'调整日志级别'
],
// 监控优化
monitoring: [
'监控查询性能',
'跟踪慢查询',
'监控连接使用情况',
'检查锁等待情况',
'分析表统计信息',
'定期性能基准测试'
],
// 维护优化
maintenance: [
'定期更新统计信息',
'优化表结构',
'清理过期数据',
'重建索引',
'备份策略优化',
'安全补丁更新'
]
};
// 数据库性能评分卡
class DatabasePerformanceScorecard {
constructor() {
this.score = 0;
this.maxScore = 100;
}
calculateScore(completedItems, totalItems) {
return Math.round((completedItems / totalItems) * 100);
}
generateReport() {
const categories = Object.keys(databaseOptimizationChecklist);
const report = {};
categories.forEach(category => {
const items = databaseOptimizationChecklist[category];
report[category] = {
total: items.length,
completed: 0, // 需要根据实际情况填充
score: 0
};
});
return report;
}
}
总结
数据库性能优化是一个持续的过程,需要从多个维度考虑:
- 架构优化 - 读写分离、分库分表、缓存策略
- 索引优化 - 合理设计和维护索引
- 查询优化 - 优化 SQL 语句和执行计划
- 配置优化 - 调整数据库参数
- 监控分析 - 持续监控和优化性能
通过系统性的数据库优化,可以显著提升应用程序的性能和可扩展性。