mysql 踩坑记录,报错:Error 1390: Prepared statement contains too many placeholders
db错误信息: Error 1390 (HY000): Prepared statement contains too many placeholders
可能的场景:批量插入数据、批量更新数据、查询数据的条件占位符过多时
根本原因:执行的SQL语句中包含了过多的占位符,MySQL对SQL语句中的占位符数量是有限制的(常见限制是65536个占位符)。
在 5.7版本的 MySQL 代码中看到:sql/sql_prepare.cc:(The maximum number of placeholders for values in a prepared statement is the maximum value of a 16-bit unsigned integer, or specfically: 65,536.)
static bool init_param_array(Prepared_statement *stmt)
{
LEX *lex= stmt->lex;
if ((stmt->param_count= lex->param_list.elements))
{
if (stmt->param_count > (uint) UINT_MAX16)
{
/* Error code to be defined in 5.0 */
my_message(ER_PS_MANY_PARAM, ER(ER_PS_MANY_PARAM), MYF(0));
return TRUE;
}
批量插入更新时:写入数据为m列,n行时,必须保证 m*n < 65536
举例:比如下面这个SQL,插入字段 m=4,65536/4=16384,此时 len(data) 必须小于16384,否则就会报错Error 1390 ...
db.Select("material_id", "material_key", "status", "created_by").Create(data)
查询数据时:必须保证 查询条件的占位符 ?< 65536
举例:比如下面这个SQL,当 len(materialIds) = 65534,len(langs)=1 时能正常查询,当 len(materialIds) = 65534,len(langs)=2 时会报错Error 1390 ...
db.Model(MaterialLang{
}).Where("material_id in ? and lang in ? and is_del = 0", materialIds, langs).Find(&list)
解决思路:分批处理
需要考虑:分批后整体耗时增加的问题
封装一个分批处理函数
1. 使用 goroutine 并发分批处理,缩短整体耗时(若需串行执行,配置并发数为1即可)
2. 使用 chan 控制并发数量,保护MySQL的过载使用
golang 代码
分批处理函数
func BatchProcess[T any](ctx context.Context, db *gorm.DB, list []T, operation func(*gorm.DB, []T) error) error {
logger := xlog.FromContext(ctx)
length := len(list)
if length == 0 {
return nil
}
limitCfg := GetConfig()
maxOnce := limitCfg.MaxDBOnce
if maxOnce <= 0 {
maxOnce = 5000 // default max once
}
if length <= maxOnce {
return operation(db, list)
}
count := length / maxOnce
if length%maxOnce > 0 {
count++
}
type Res struct {
Err error
Start int
End int
}
result := make(chan Res, count)
var wg sync.WaitGroup
maxCount := limitCfg.MaxDBGoroutine
if maxCount <= 0 {
maxCount = 5 // default max goroutine
}
limit := make(chan struct{
}, maxCount)
for i := 0; i < count; i++ {
limit <- struct{
}{
}
wg.Add(1)
go func(i int) {
var err error
startTm := time.Now()
start := i * maxOnce
end := (i + 1) * maxOnce
if end > length {
end = length
}
chunk := list[start:end]
defer func() {
<-limit
wg.Done()
logger.Info("BatchProcess", xlog.String("cost", time.Since(startTm).String()), xlog.Int("start", start), xlog.Int("end", end), xlog.Int("total", length), xlog.Err(err))
}()
err = operation(db, chunk)
result <- Res{
Err: err,
Start: start,
End: end,
}
}(i)
}
wg.Wait()
close(result)
for v := range result {
if v.Err != nil {
logger.Error("BatchProcess err", xlog.Err(v.Err), xlog.Int("start", v.Start), xlog.Int("end", v.End))
return v.Err
}
}
logger.Info("BatchProcess end", xlog.Int("total", length))
return nil
}
批量插入新时,使用示例
func AddMany(ctx context.Context, db *gorm.DB, list []*MaterialVersion) error {
if len(list) == 0 {
return nil
}
err := BatchProcess(ctx, db, list, func(db *xgorm.DB, data []*MaterialVersion) error {
return db.Select("material_id", "material_key", "status", "created_by").
Create(data).Error
})
return err
}
查询数据时,使用示例
注意:如果需要在批量处理时修改外部变量时,记得要加锁!
func List(ctx context.Context, db *xgorm.DB, materialIds []int64, langs []string) ([]*model.Material, error) {
var mu sync.Mutex
retList := make([]*model.Material, 0)
err := BatchProcess(ctx, db, materialIds, func(db *xgorm.DB, ids []int64) error {
res := make([]*model.Material, 0)
err := db.Model(MaterialLang{
}).Where("material_id in ? and lang in ? and is_del = 0", ids, langs).
Find(&res).Error
if err != nil {
return err
}
mu.Lock()
retList = append(retList, res...)
mu.Unlock()
return nil
})
return retList, err
}