Appearance
MySQL 性能调优
MySQL 性能调优是优化数据库系统以获得最佳性能的过程。本章将详细介绍 MySQL 性能调优的各个方面,包括配置优化、查询优化、索引优化和架构优化。
MySQL 配置优化
全局系统变量调优
sql
-- 查看当前配置
SHOW VARIABLES LIKE 'innodb_buffer_pool_size';
SHOW VARIABLES LIKE 'query_cache_size';
SHOW VARIABLES LIKE 'max_connections';
-- 重要配置参数
-- 1. InnoDB 缓冲池大小 (关键参数)
-- 推荐设置为物理内存的 60-80%
SET GLOBAL innodb_buffer_pool_size = 4294967296; -- 4GB
-- 2. 最大连接数
SET GLOBAL max_connections = 500;
-- 3. 查询缓存 (在 MySQL 8.0 中已移除)
SET GLOBAL query_cache_size = 268435456; -- 256MB (MySQL 5.7 及以下)
-- 4. InnoDB 日志文件大小
SET GLOBAL innodb_log_file_size = 536870912; -- 512MB
-- 5. InnoDB 日志缓冲区
SET GLOBAL innodb_log_buffer_size = 16777216; -- 16MB
-- 6. 排序缓冲区
SET GLOBAL sort_buffer_size = 2097152; -- 2MB
-- 7. 读取缓冲区
SET GLOBAL read_buffer_size = 1048576; -- 1MB
-- 8. 读取随机缓冲区
SET GLOBAL read_rnd_buffer_size = 2097152; -- 2MB
-- 9. 连接线程缓存
SET GLOBAL thread_cache_size = 50;
-- 10. 临时表大小
SET GLOBAL tmp_table_size = 67108864; -- 64MB
SET GLOBAL max_heap_table_size = 67108864; -- 64MB
my.cnf 配置示例
ini
[mysqld]
# 基本配置
port = 3306
socket = /var/run/mysqld/mysqld.sock
# InnoDB 配置
innodb_buffer_pool_size = 4G
innodb_buffer_pool_instances = 4
innodb_log_file_size = 512M
innodb_log_buffer_size = 16M
innodb_flush_log_at_trx_commit = 1
innodb_file_per_table = 1
innodb_flush_method = O_DIRECT
# 连接配置
max_connections = 500
max_connect_errors = 100000
thread_cache_size = 50
# 查询缓存 (MySQL 5.7 及以下)
query_cache_type = 1
query_cache_size = 256M
query_cache_limit = 2M
# 临时表配置
tmp_table_size = 64M
max_heap_table_size = 64M
# 排序和连接配置
sort_buffer_size = 2M
read_buffer_size = 1M
read_rnd_buffer_size = 2M
join_buffer_size = 2M
binlog_cache_size = 64K
# 日志配置
log_error = /var/log/mysql/error.log
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 2
log_queries_not_using_indexes = 1
# 安全配置
local-infile = 0
查询性能优化
慢查询分析
sql
-- 启用慢查询日志
SET GLOBAL slow_query_log = 'ON';
SET GLOBAL long_query_time = 1; -- 记录执行时间超过1秒的查询
-- 查看慢查询日志中的查询
-- 在应用层面分析慢查询
SELECT
query_time,
lock_time,
rows_sent,
rows_examined,
sql_text
FROM mysql.slow_log
ORDER BY query_time DESC
LIMIT 10;
-- 使用 PERFORMANCE_SCHEMA 分析查询性能
SELECT
DIGEST_TEXT,
COUNT_STAR,
AVG_TIMER_WAIT/1000000000 AS avg_time_s,
SUM_ROWS_EXAMINED,
SUM_ROWS_SENT
FROM performance_schema.events_statements_summary_by_digest
WHERE DIGEST_TEXT LIKE '%your_table%'
ORDER BY AVG_TIMER_WAIT DESC
LIMIT 10;
查询优化技术
sql
-- 1. 使用 EXPLAIN 分析查询执行计划
EXPLAIN SELECT u.id, u.username, p.email
FROM users u
JOIN profiles p ON u.id = p.user_id
WHERE u.status = 'active'
ORDER BY u.created_at DESC
LIMIT 20;
-- 2. 优化 JOIN 查询
-- 确保 JOIN 条件上有索引
-- 优化前
SELECT * FROM large_table1 l1, large_table2 l2
WHERE l1.id = l2.large_table1_id AND l1.status = 'active';
-- 优化后 - 使用显式 JOIN 和适当的索引
SELECT l1.*, l2.*
FROM large_table1 l1
INNER JOIN large_table2 l2 ON l1.id = l2.large_table1_id
WHERE l1.status = 'active'
AND INDEX(l1.status); -- 确保 status 字段有索引
-- 3. 避免 SELECT *
-- 优化前
SELECT * FROM users WHERE id = 1;
-- 优化后
SELECT id, username, email FROM users WHERE id = 1;
-- 4. 使用 LIMIT 优化大数据集查询
-- 优化前 - 可能扫描整个表
SELECT * FROM products WHERE category = 'electronics';
-- 优化后 - 限制结果集
SELECT id, name, price FROM products
WHERE category = 'electronics'
ORDER BY created_at DESC
LIMIT 100;
-- 5. 优化子查询
-- 优化前 - 使用子查询
SELECT * FROM users
WHERE id IN (SELECT user_id FROM orders WHERE total > 1000);
-- 优化后 - 使用 JOIN
SELECT DISTINCT u.*
FROM users u
INNER JOIN orders o ON u.id = o.user_id
WHERE o.total > 1000;
索引优化
sql
-- 1. 创建合适的索引
-- 单列索引
CREATE INDEX idx_users_status ON users(status);
CREATE INDEX idx_users_created_at ON users(created_at);
-- 复合索引 (注意字段顺序)
CREATE INDEX idx_users_status_created ON users(status, created_at);
CREATE INDEX idx_orders_user_status ON orders(user_id, status);
-- 2. 使用 EXPLAIN 检查索引使用情况
EXPLAIN FORMAT=JSON
SELECT * FROM users
WHERE status = 'active' AND created_at > '2023-01-01';
-- 3. 覆盖索引优化
-- 创建包含所有需要字段的索引,避免回表查询
CREATE INDEX idx_users_covering ON users(status, created_at, username, email);
-- 这样的查询可以直接从索引获取所有数据
SELECT username, email FROM users
WHERE status = 'active'
ORDER BY created_at DESC LIMIT 20;
-- 4. 索引维护
-- 查看未使用的索引
SELECT
object_schema,
object_name,
index_name
FROM performance_schema.table_io_waits_summary_by_index_usage
WHERE index_name IS NOT NULL
AND count_star = 0
AND object_schema NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys');
-- 删除未使用的索引
-- DROP INDEX index_name ON table_name;
-- 5. 索引统计信息更新
ANALYZE TABLE users;
表结构优化
数据类型优化
sql
-- 1. 选择合适的数据类型
-- 优化前 - 使用过大的数据类型
CREATE TABLE products (
id BIGINT AUTO_INCREMENT, -- 可能不需要 BIGINT
name VARCHAR(255), -- 可能不需要这么长
price DECIMAL(20,2), -- 精度可能过高
status TINYTEXT -- 应该使用 ENUM
);
-- 优化后
CREATE TABLE products (
id INT AUTO_INCREMENT, -- 根据实际需要选择
name VARCHAR(100), -- 根据实际需要调整
price DECIMAL(10,2), -- 合适的精度
status ENUM('active', 'inactive', 'deleted') DEFAULT 'active'
);
-- 2. 使用合适的数据类型节省空间
-- INT vs TINYINT vs SMALLINT
-- 如果状态只有几个值,使用 TINYINT(1 byte) 而不是 INT(4 bytes)
CREATE TABLE user_status (
user_id INT,
is_active TINYINT(1), -- 0 或 1,比 BOOLEAN 更明确
account_type TINYINT -- 如果类型不超过255种
);
-- 3. 使用 UNSIGNED 优化数值类型
CREATE TABLE user_scores (
user_id INT UNSIGNED, -- 非负数,扩大正数范围
score SMALLINT UNSIGNED, -- 0-65535 而不是 -32768 到 32767
level TINYINT UNSIGNED -- 0-255 而不是 -128 到 127
);
表分区
sql
-- 1. 按范围分区 (适用于时间序列数据)
CREATE TABLE orders (
id INT AUTO_INCREMENT,
order_date DATE NOT NULL,
user_id INT,
amount DECIMAL(10,2),
PRIMARY KEY (id, order_date)
)
PARTITION BY RANGE (YEAR(order_date)) (
PARTITION p2022 VALUES LESS THAN (2023),
PARTITION p2023 VALUES LESS THAN (2024),
PARTITION p2024 VALUES LESS THAN (2025),
PARTITION p_future VALUES LESS THAN MAXVALUE
);
-- 2. 按列表分区
CREATE TABLE users_by_region (
id INT AUTO_INCREMENT,
region ENUM('north', 'south', 'east', 'west'),
username VARCHAR(50),
PRIMARY KEY (id, region)
)
PARTITION BY LIST COLUMNS (region) (
PARTITION p_north VALUES IN ('north'),
PARTITION p_south VALUES IN ('south'),
PARTITION p_east VALUES IN ('east'),
PARTITION p_west VALUES IN ('west')
);
-- 3. 按哈希分区
CREATE TABLE sessions (
id BIGINT AUTO_INCREMENT,
session_key VARCHAR(128) NOT NULL,
user_id INT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id, session_key)
)
PARTITION BY HASH(CRC32(session_key)) PARTITIONS 8;
读写分离和分库分表
主从复制配置
sql
-- 1. 主库配置 (my.cnf)
[mysqld]
server-id = 1
log-bin = mysql-bin
binlog-format = ROW
binlog-do-db = your_database_name
expire_logs_days = 7
max_binlog_size = 100M
-- 2. 从库配置
[mysqld]
server-id = 2
relay-log = mysql-relay-bin
read_only = 1
-- 3. 设置主从复制
-- 在主库上
CREATE USER 'repl_user'@'%' IDENTIFIED BY 'repl_password';
GRANT REPLICATION SLAVE ON *.* TO 'repl_user'@'%';
FLUSH PRIVILEGES;
-- 获取主库二进制日志位置
SHOW MASTER STATUS;
-- 在从库上
CHANGE MASTER TO
MASTER_HOST='master_host_ip',
MASTER_USER='repl_user',
MASTER_PASSWORD='repl_password',
MASTER_LOG_FILE='mysql-bin.000001',
MASTER_LOG_POS=154;
START SLAVE;
SHOW SLAVE STATUS\G
应用层面的读写分离
javascript
// Node.js 应用中的读写分离实现
class DatabaseRouter {
constructor() {
// 主库 - 用于写操作
this.master = mysql.createPool({
host: process.env.MASTER_DB_HOST,
user: process.env.MASTER_DB_USER,
password: process.env.MASTER_DB_PASSWORD,
database: process.env.DB_NAME,
connectionLimit: 10
});
// 从库 - 用于读操作
this.slaves = [
mysql.createPool({
host: process.env.SLAVE_DB_HOST_1,
user: process.env.SLAVE_DB_USER,
password: process.env.SLAVE_DB_PASSWORD,
database: process.env.DB_NAME,
connectionLimit: 5
}),
mysql.createPool({
host: process.env.SLAVE_DB_HOST_2,
user: process.env.SLAVE_DB_USER,
password: process.env.SLAVE_DB_PASSWORD,
database: process.env.DB_NAME,
connectionLimit: 5
})
];
}
// 随机选择从库
getSlaveConnection() {
const randomIndex = Math.floor(Math.random() * this.slaves.length);
return this.slaves[randomIndex];
}
// 执行写操作
async executeWrite(query, params) {
const [results] = await this.master.execute(query, params);
return results;
}
// 执行读操作
async executeRead(query, params) {
const slave = this.getSlaveConnection();
const [results] = await slave.execute(query, params);
return results;
}
// 智能路由 - 分析 SQL 语句类型
async execute(sql, params) {
const sqlType = this.getSqlType(sql);
if (this.isWriteQuery(sqlType)) {
return await this.executeWrite(sql, params);
} else {
return await this.executeRead(sql, params);
}
}
getSqlType(sql) {
const trimmedSql = sql.trim().toUpperCase();
if (trimmedSql.startsWith('SELECT')) return 'SELECT';
if (trimmedSql.startsWith('INSERT')) return 'INSERT';
if (trimmedSql.startsWith('UPDATE')) return 'UPDATE';
if (trimmedSql.startsWith('DELETE')) return 'DELETE';
if (trimmedSql.startsWith('REPLACE')) return 'REPLACE';
return 'OTHER';
}
isWriteQuery(sqlType) {
return ['INSERT', 'UPDATE', 'DELETE', 'REPLACE'].includes(sqlType);
}
}
监控和性能分析
性能监控查询
sql
-- 1. 当前运行的进程
SELECT
ID,
USER,
HOST,
DB,
COMMAND,
TIME,
STATE,
INFO
FROM information_schema.PROCESSLIST
WHERE COMMAND != 'Sleep'
ORDER BY TIME DESC;
-- 2. 检查锁等待情况
SELECT
r.trx_id waiting_trx_id,
r.trx_mysql_thread_id waiting_thread,
r.trx_query waiting_query,
b.trx_id blocking_trx_id,
b.trx_mysql_thread_id blocking_thread,
b.trx_query blocking_query
FROM information_schema.INNODB_LOCKS w
INNER JOIN information_schema.INNODB_LOCKS b
ON w.lock_table = b.lock_table
AND w.lock_index = b.lock_index
WHERE w.lock_trx_id = r.trx_id
AND b.lock_trx_id = b.trx_id;
-- 3. 检查表的统计信息
SELECT
TABLE_NAME,
TABLE_ROWS,
DATA_LENGTH,
INDEX_LENGTH,
DATA_FREE,
CREATE_TIME,
UPDATE_TIME
FROM information_schema.TABLES
WHERE TABLE_SCHEMA = 'your_database_name';
-- 4. 检查索引使用情况
SELECT
s.TABLE_NAME,
s.INDEX_NAME,
s.COLUMN_NAME,
s.CARDINALITY,
t.TABLE_ROWS,
ROUND(s.CARDINALITY/t.TABLE_ROWS*100, 2) AS selectivity
FROM information_schema.STATISTICS s
INNER JOIN information_schema.TABLES t
ON s.TABLE_NAME = t.TABLE_NAME
AND s.TABLE_SCHEMA = t.TABLE_SCHEMA
WHERE s.TABLE_SCHEMA = 'your_database_name'
AND t.TABLE_ROWS > 0
ORDER BY selectivity DESC;
使用 PERFORMANCE_SCHEMA 监控
sql
-- 1. 检查事件统计
SELECT
EVENT_NAME,
COUNT_STAR,
SUM_TIMER_WAIT/1000000000 AS total_time_s,
AVG_TIMER_WAIT/1000000 AS avg_time_ms
FROM performance_schema.events_waits_summary_global_by_event_name
WHERE COUNT_STAR > 0
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 10;
-- 2. 检查文件 I/O
SELECT
EVENT_NAME,
COUNT_READ,
COUNT_WRITE,
SUM_NUMBER_OF_BYTES_READ,
SUM_NUMBER_OF_BYTES_WRITE
FROM performance_schema.file_summary_by_event_name
WHERE COUNT_READ + COUNT_WRITE > 0
ORDER BY COUNT_READ + COUNT_WRITE DESC;
-- 3. 检查表锁等待
SELECT
OBJECT_SCHEMA,
OBJECT_NAME,
COUNT_READ_NORMAL,
COUNT_READ_WITH_SHARED_LOCKS,
COUNT_WRITE_ALLOW_WRITE,
COUNT_WRITE_ONLY
FROM performance_schema.table_lock_waits_summary_by_table
WHERE OBJECT_SCHEMA = 'your_database_name'
ORDER BY COUNT_READ_NORMAL + COUNT_WRITE_ALLOW_WRITE DESC;
性能调优工具
MySQLTuner
bash
#!/bin/bash
# MySQLTuner - MySQL 检查脚本
# 安装和使用
wget http://mysqltuner.pl/ -O mysqltuner.pl
wget https://raw.githubusercontent.com/major/MySQLTuner-perl/master/basic_passwords.txt -O basic_passwords.txt
wget https://raw.githubusercontent.com/major/MySQLTuner-perl/master/vulnerabilities.csv -O vulnerabilities.csv
perl mysqltuner.pl --host localhost --user root --pass your_password
Percona Toolkit
bash
# 安装 Percona Toolkit
# 用于高级 MySQL 性能分析
# 1. 检查复制延迟
pt-heartbeat --daemonize --database percona --table heartbeat --create-table --update
# 2. 分析慢查询日志
pt-query-digest /var/log/mysql/slow.log
# 3. 检查表结构优化
pt-online-schema-change --alter "ADD COLUMN new_column INT" D=database_name,t=table_name --execute
实际优化案例
电商系统优化案例
sql
-- 问题:订单查询慢
-- 原始查询
SELECT o.*, u.username, p.name as product_name
FROM orders o
JOIN users u ON o.user_id = u.id
JOIN order_items oi ON o.id = oi.order_id
JOIN products p ON oi.product_id = p.id
WHERE o.created_at BETWEEN '2023-01-01' AND '2023-12-31'
ORDER BY o.created_at DESC
LIMIT 50;
-- 优化步骤:
-- 1. 添加复合索引
CREATE INDEX idx_orders_created_user ON orders(created_at DESC, user_id);
CREATE INDEX idx_order_items_order_product ON order_items(order_id, product_id);
-- 2. 优化查询 - 减少 JOIN 数量
SELECT o.id, o.total_amount, o.status, o.created_at,
u.username,
-- 聚合产品信息而不是 JOIN 产品表
GROUP_CONCAT(p.name SEPARATOR ', ') as product_names
FROM orders o
JOIN users u ON o.user_id = u.id
JOIN order_items oi ON o.id = oi.order_id
JOIN products p ON oi.product_id = p.id
WHERE o.created_at BETWEEN '2023-01-01' AND '2023-12-31'
GROUP BY o.id, u.username
ORDER BY o.created_at DESC
LIMIT 50;
-- 3. 进一步优化 - 分页优化
-- 使用游标分页替代 OFFSET
SELECT o.id, o.total_amount, o.status, o.created_at
FROM orders o
WHERE o.created_at < '2023-06-01' -- 上一页的最后时间
AND o.id < 12345 -- 上一页的最后ID
ORDER BY o.created_at DESC, o.id DESC
LIMIT 50;
大数据量表优化
sql
-- 处理大数据量表的优化策略
-- 1. 历史数据归档
-- 创建归档表
CREATE TABLE orders_archive LIKE orders;
ALTER TABLE orders_archive REMOVE PARTITIONING; -- 如果原表有分区
-- 归档历史数据
INSERT INTO orders_archive
SELECT * FROM orders
WHERE created_at < DATE_SUB(NOW(), INTERVAL 1 YEAR);
-- 删除已归档的数据
DELETE FROM orders
WHERE created_at < DATE_SUB(NOW(), INTERVAL 1 YEAR);
-- 2. 分区表维护
-- 添加新分区
ALTER TABLE orders
ADD PARTITION (PARTITION p2025 VALUES LESS THAN (2026));
-- 删除旧分区
ALTER TABLE orders
DROP PARTITION p2022;
-- 3. 在线表结构修改 (使用 pt-online-schema-change)
-- 这样可以在不影响在线服务的情况下修改表结构
性能监控仪表板
关键性能指标
sql
-- 创建性能监控视图
CREATE VIEW performance_monitoring AS
SELECT
'Buffer Pool Hit Ratio' as metric_name,
ROUND((1 - (SUM(IF(variable_name = 'Innodb_buffer_pool_reads', variable_value, 0)) /
NULLIF(SUM(IF(variable_name = 'Innodb_buffer_pool_read_requests', variable_value, 0)), 0))) * 100, 2) as value,
'Percentage' as unit
FROM information_schema.GLOBAL_STATUS
WHERE variable_name IN ('Innodb_buffer_pool_reads', 'Innodb_buffer_pool_read_requests')
UNION ALL
SELECT
'Key Buffer Hit Ratio' as metric_name,
ROUND((1 - (SUM(IF(variable_name = 'Key_reads', variable_value, 0)) /
NULLIF(SUM(IF(variable_name = 'Key_read_requests', variable_value, 0)), 0))) * 100, 2) as value,
'Percentage' as unit
FROM information_schema.GLOBAL_STATUS
WHERE variable_name IN ('Key_reads', 'Key_read_requests')
UNION ALL
SELECT
'Query Cache Hit Ratio' as metric_name,
ROUND((SUM(IF(variable_name = 'Qcache_hits', variable_value, 0)) /
NULLIF(SUM(IF(variable_name = 'Qcache_inserts', variable_value, 0)) +
SUM(IF(variable_name = 'Qcache_hits', variable_value, 0)), 0)) * 100, 2) as value,
'Percentage' as unit
FROM information_schema.GLOBAL_STATUS
WHERE variable_name IN ('Qcache_hits', 'Qcache_inserts');
总结
MySQL 性能调优是一个持续的过程,需要:
- 配置优化 - 根据硬件和应用需求调整 MySQL 配置
- 查询优化 - 使用 EXPLAIN 分析和优化慢查询
- 索引优化 - 创建合适的索引并定期维护
- 架构优化 - 使用分区、主从复制等技术
- 监控分析 - 持续监控性能指标并分析瓶颈
通过系统性的性能调优,可以显著提升 MySQL 数据库的性能和稳定性。