|
@@ -3,19 +3,20 @@ package services
|
|
|
import (
|
|
|
"encoding/csv"
|
|
|
"fmt"
|
|
|
+ "io"
|
|
|
+ "io/ioutil"
|
|
|
+ "log"
|
|
|
"os"
|
|
|
"path/filepath"
|
|
|
- "runtime"
|
|
|
- "sort"
|
|
|
+ "strings"
|
|
|
"sync"
|
|
|
"time"
|
|
|
|
|
|
- "regexp"
|
|
|
"xg_fetl/internal/db_executor"
|
|
|
"xg_fetl/internal/models"
|
|
|
)
|
|
|
|
|
|
-// ReaderMain 主函数,参数化配置,使其更加灵活
|
|
|
+// ReaderMain 函数读取 CSV 文件并将数据插入数据库
|
|
|
func ReaderMain(
|
|
|
dbName string, // 数据库名称
|
|
|
dirPath string, // CSV 文件所在的根目录
|
|
@@ -24,262 +25,258 @@ func ReaderMain(
|
|
|
pageSize int, // 分页大小,每次读取的记录数
|
|
|
mysqeExec db_executor.DBExecutor, // 数据库执行器接口
|
|
|
) {
|
|
|
- // 创建用于接收每个表处理结果的通道
|
|
|
- resultChan := make(chan TableResult)
|
|
|
- // 创建等待组,用于等待所有表的处理完成
|
|
|
+ // 创建日志文件
|
|
|
+ logFile, err := os.OpenFile("process.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
|
|
+ if err != nil {
|
|
|
+ fmt.Printf("无法创建日志文件:%v\n", err)
|
|
|
+ return
|
|
|
+ }
|
|
|
+ defer logFile.Close()
|
|
|
+
|
|
|
+ // 设置日志输出到文件和控制台
|
|
|
+ multiWriter := io.MultiWriter(os.Stdout, logFile)
|
|
|
+ logger := log.New(multiWriter, "", log.LstdFlags)
|
|
|
+
|
|
|
+ // 创建一个等待组,用于等待所有表的处理完成
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
- // 遍历每个表进行处理,使用协程
|
|
|
- for _, tableName := range sortedTableNames(tableInfos) {
|
|
|
- wg.Add(1)
|
|
|
+ // 创建一个通道,用于收集每个表的处理结果
|
|
|
+ resultsChan := make(chan TableResult)
|
|
|
|
|
|
- go func(tableName string) {
|
|
|
+ // 遍历每个表,启动协程进行处理
|
|
|
+ for tableName, tableInfo := range tableInfos {
|
|
|
+ wg.Add(1)
|
|
|
+ go func(tableName string, tableInfo models.TableInfo) {
|
|
|
defer wg.Done()
|
|
|
|
|
|
- // 开始处理表,记录开始时间
|
|
|
- startTime := time.Now()
|
|
|
-
|
|
|
// 初始化 TableResult
|
|
|
- tableResult := TableResult{
|
|
|
+ result := TableResult{
|
|
|
TableName: tableName,
|
|
|
- Success: false,
|
|
|
- GoroutineCount: 1, // 只使用一个协程
|
|
|
+ Success: true,
|
|
|
+ GeneratedFiles: 0,
|
|
|
+ ErrorCount: 0,
|
|
|
+ GoroutineCount: 0,
|
|
|
+ TotalRows: 0,
|
|
|
+ AverageRowSize: 0,
|
|
|
+ ExportDuration: 0,
|
|
|
+ TableSize: 0,
|
|
|
Logs: []string{},
|
|
|
}
|
|
|
|
|
|
- // 构建表名对应的文件夹路径,例如 dirPath/tableName
|
|
|
- tableDirPath := filepath.Join(dirPath, tableName)
|
|
|
-
|
|
|
- // 检查表名对应的文件夹是否存在
|
|
|
- if _, err := os.Stat(tableDirPath); os.IsNotExist(err) {
|
|
|
- errMsg := fmt.Sprintf("表 %s 对应的目录不存在: %v", tableName, err)
|
|
|
- tableResult.Error = fmt.Errorf(errMsg)
|
|
|
- tableResult.Logs = append(tableResult.Logs, errMsg)
|
|
|
- resultChan <- tableResult
|
|
|
- return
|
|
|
- }
|
|
|
-
|
|
|
- // 编译匹配 CSV 文件名的正则表达式,格式为 表名_[序号].csv
|
|
|
- filenamePattern := fmt.Sprintf(`^%s_\d+\.csv$`, regexp.QuoteMeta(tableName))
|
|
|
- re, err := regexp.Compile(filenamePattern)
|
|
|
- if err != nil {
|
|
|
- errMsg := fmt.Sprintf("无法编译文件名正则表达式: %v", err)
|
|
|
- tableResult.Error = fmt.Errorf(errMsg)
|
|
|
- tableResult.Logs = append(tableResult.Logs, errMsg)
|
|
|
- resultChan <- tableResult
|
|
|
- return
|
|
|
- }
|
|
|
-
|
|
|
- // 获取表目录下的所有 CSV 文件路径
|
|
|
- files, err := GetCSVFiles(tableDirPath)
|
|
|
- if err != nil {
|
|
|
- errMsg := fmt.Sprintf("无法获取 CSV 文件: %v", err)
|
|
|
- tableResult.Error = fmt.Errorf(errMsg)
|
|
|
- tableResult.Logs = append(tableResult.Logs, errMsg)
|
|
|
- resultChan <- tableResult
|
|
|
- return
|
|
|
- }
|
|
|
-
|
|
|
- // 过滤出符合命名规则的文件列表,并按文件名排序
|
|
|
- var matchedFiles []string
|
|
|
- for _, filePath := range files {
|
|
|
- filename := filepath.Base(filePath)
|
|
|
- if re.MatchString(filename) {
|
|
|
- matchedFiles = append(matchedFiles, filePath)
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if len(matchedFiles) == 0 {
|
|
|
- errMsg := fmt.Sprintf("表 %s 没有找到符合条件的 CSV 文件", tableName)
|
|
|
- tableResult.Error = fmt.Errorf(errMsg)
|
|
|
- tableResult.Logs = append(tableResult.Logs, errMsg)
|
|
|
- resultChan <- tableResult
|
|
|
- return
|
|
|
- }
|
|
|
-
|
|
|
- // 按文件名排序,确保顺序一致
|
|
|
- sort.Strings(matchedFiles)
|
|
|
+ // 记录“开始处理表”日志
|
|
|
+ logMessage(&result, fmt.Sprintf("开始处理表:%s\n", tableName), logger)
|
|
|
|
|
|
- // 估算可用内存,决定一次加载多少个文件
|
|
|
- availableMemory := getAvailableMemory()
|
|
|
- var batchFiles []string
|
|
|
- var totalSize int64 = 0
|
|
|
- fmt.Println("可用内存:", availableMemory)
|
|
|
- for idx, filePath := range matchedFiles {
|
|
|
- fileInfo, err := os.Stat(filePath)
|
|
|
- if err != nil {
|
|
|
- tableResult.ErrorCount++
|
|
|
- logMsg := fmt.Sprintf("无法获取文件信息 %s: %v", filePath, err)
|
|
|
- tableResult.Logs = append(tableResult.Logs, logMsg)
|
|
|
- continue
|
|
|
- }
|
|
|
- fileSize := fileInfo.Size()
|
|
|
-
|
|
|
- // 判断是否超过可用内存
|
|
|
- if totalSize+fileSize > availableMemory && len(batchFiles) > 0 {
|
|
|
- // 处理当前批次文件
|
|
|
- fmt.Println("处理当前批次文件", batchFiles)
|
|
|
- err = processCSVFiles(batchFiles, delimiter, mysqeExec, tableName, &tableResult)
|
|
|
- if err != nil {
|
|
|
- tableResult.ErrorCount++
|
|
|
- logMsg := fmt.Sprintf("处理文件批次时出错: %v", err)
|
|
|
- tableResult.Logs = append(tableResult.Logs, logMsg)
|
|
|
- }
|
|
|
- // 重置批次
|
|
|
- batchFiles = []string{}
|
|
|
- fmt.Println("重置批次")
|
|
|
- totalSize = 0
|
|
|
- }
|
|
|
-
|
|
|
- batchFiles = append(batchFiles, filePath)
|
|
|
- totalSize += fileSize
|
|
|
-
|
|
|
- // 如果是最后一个文件,处理剩余的批次
|
|
|
- if idx == len(matchedFiles)-1 && len(batchFiles) > 0 {
|
|
|
- fmt.Println(",处理剩余的批次 ", batchFiles)
|
|
|
- err = processCSVFiles(batchFiles, delimiter, mysqeExec, tableName, &tableResult)
|
|
|
- if err != nil {
|
|
|
- tableResult.ErrorCount++
|
|
|
- logMsg := fmt.Sprintf("处理文件批次时出错: %v", err)
|
|
|
- tableResult.Logs = append(tableResult.Logs, logMsg)
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
+ // 处理单个表
|
|
|
+ result = processTable(dbName, dirPath, tableName, tableInfo, delimiter, pageSize, mysqeExec, &result, logger)
|
|
|
|
|
|
- // 计算导出持续时间
|
|
|
- tableResult.ExportDuration = time.Since(startTime)
|
|
|
- tableResult.Success = tableResult.ErrorCount == 0
|
|
|
+ // 将结果发送到通道
|
|
|
+ resultsChan <- result
|
|
|
|
|
|
- resultChan <- tableResult
|
|
|
- }(tableName)
|
|
|
+ // 记录“完成处理表”日志
|
|
|
+ logMessage(&result, fmt.Sprintf("完成处理表:%s\n", tableName), logger)
|
|
|
+ }(tableName, tableInfo)
|
|
|
}
|
|
|
|
|
|
- // 开启一个协程来收集结果并打印
|
|
|
+ // 启动一个协程,等待所有表处理完毕后关闭结果通道
|
|
|
go func() {
|
|
|
wg.Wait()
|
|
|
- close(resultChan)
|
|
|
+ close(resultsChan)
|
|
|
}()
|
|
|
|
|
|
- // 打印结果
|
|
|
- for result := range resultChan {
|
|
|
- if result.Success {
|
|
|
- fmt.Printf("表 %s 导入成功,耗时 %v,共插入 %d 行数据。\n", result.TableName, result.ExportDuration, result.TotalRows)
|
|
|
- } else {
|
|
|
- fmt.Printf("表 %s 导入完成,存在错误,错误数量 %d,耗时 %v。\n", result.TableName, result.ErrorCount, result.ExportDuration)
|
|
|
- }
|
|
|
- // 打印详细日志
|
|
|
- for _, logMsg := range result.Logs {
|
|
|
- fmt.Println(logMsg)
|
|
|
- }
|
|
|
+ // 收集所有处理结果到 resultsMap
|
|
|
+ resultsMap := make(map[string]TableResult)
|
|
|
+
|
|
|
+ // 从结果通道接收每个表的处理结果
|
|
|
+ for result := range resultsChan {
|
|
|
+ // 处理结果,例如输出日志或进行其他处理
|
|
|
+ logMessage(&result, fmt.Sprintf("表 %s 处理完成,成功:%v,错误数量:%d,总行数:%d\n",
|
|
|
+ result.TableName, result.Success, result.ErrorCount, result.TotalRows), logger)
|
|
|
+
|
|
|
+ // 将结果存储到 resultsMap
|
|
|
+ resultsMap[result.TableName] = result
|
|
|
+ }
|
|
|
+
|
|
|
+ // 调用 WriteImportReportToLogFile 将总览信息和详细日志写入日志文件
|
|
|
+ err = WriteImportReportToLogFile("import_report.log", resultsMap, tableInfos)
|
|
|
+ if err != nil {
|
|
|
+ logger.Printf("写入导入报告失败:%v\n", err)
|
|
|
+ } else {
|
|
|
+ logger.Println("导入报告已成功写入 import_report.log")
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-// processCSVFiles 处理一批 CSV 文件
|
|
|
-func processCSVFiles(
|
|
|
- files []string,
|
|
|
+// 处理单个表的函数
|
|
|
+func processTable(
|
|
|
+ dbName string,
|
|
|
+ dirPath string,
|
|
|
+ tableName string,
|
|
|
+ tableInfo models.TableInfo,
|
|
|
delimiter rune,
|
|
|
+ pageSize int,
|
|
|
mysqeExec db_executor.DBExecutor,
|
|
|
- tableName string,
|
|
|
- tableResult *TableResult,
|
|
|
-) error {
|
|
|
- for _, filePath := range files {
|
|
|
- logMsg := fmt.Sprintf("开始读入文件: %s", filePath)
|
|
|
- fmt.Println(logMsg)
|
|
|
- tableResult.Logs = append(tableResult.Logs, logMsg)
|
|
|
+ result *TableResult,
|
|
|
+ logger *log.Logger,
|
|
|
+) TableResult {
|
|
|
+ logMessage(result, fmt.Sprintf("开始处理表 %s 的数据\n", tableName), logger)
|
|
|
|
|
|
- // 读取整个 CSV 文件到内存中
|
|
|
- headers, records, err := ReadEntireCSV(filePath, delimiter)
|
|
|
- if err != nil {
|
|
|
- tableResult.ErrorCount++
|
|
|
- logMsg = fmt.Sprintf("无法读取 CSV 文件 %s: %v", filePath, err)
|
|
|
- fmt.Println(logMsg)
|
|
|
- tableResult.Logs = append(tableResult.Logs, logMsg)
|
|
|
- continue
|
|
|
- }
|
|
|
+ // 记录开始时间
|
|
|
+ startTime := time.Now()
|
|
|
+
|
|
|
+ // 构建表对应的目录路径
|
|
|
+ tableDir := filepath.Join(dirPath, tableName)
|
|
|
+ logMessage(result, fmt.Sprintf("表 %s 的目录路径为:%s\n", tableName, tableDir), logger)
|
|
|
|
|
|
- // 更新总行数和平均行大小
|
|
|
- tableResult.TotalRows += len(records)
|
|
|
- if len(records) > 0 {
|
|
|
- fileInfo, statErr := os.Stat(filePath)
|
|
|
- if statErr == nil && fileInfo.Size() > 0 {
|
|
|
- rowSize := fileInfo.Size() / int64(len(records))
|
|
|
- // 计算平均行大小
|
|
|
- if tableResult.AverageRowSize == 0 {
|
|
|
- tableResult.AverageRowSize = rowSize
|
|
|
+ // 获取该表目录下的所有 CSV 文件
|
|
|
+ files, err := ioutil.ReadDir(tableDir)
|
|
|
+ if err != nil {
|
|
|
+ result.Success = false
|
|
|
+ result.Error = err
|
|
|
+ logMsg := fmt.Sprintf("读取目录失败:%v\n", err)
|
|
|
+ logMessage(result, logMsg, logger)
|
|
|
+ return *result
|
|
|
+ }
|
|
|
+
|
|
|
+ // 用于统计行大小的变量
|
|
|
+ var totalRowSize int64 = 0
|
|
|
+
|
|
|
+ // 遍历所有 CSV 文件,启动协程处理
|
|
|
+ var fileWg sync.WaitGroup
|
|
|
+
|
|
|
+ // 控制最大并发数
|
|
|
+ maxGoroutines := 5
|
|
|
+ semaphore := make(chan struct{}, maxGoroutines)
|
|
|
+
|
|
|
+ for _, file := range files {
|
|
|
+ if !file.IsDir() && strings.HasPrefix(file.Name(), tableName+"_") && strings.HasSuffix(file.Name(), ".csv") {
|
|
|
+ // 满足条件的文件
|
|
|
+ filePath := filepath.Join(tableDir, file.Name())
|
|
|
+ result.GeneratedFiles++
|
|
|
+ fileWg.Add(1)
|
|
|
+ result.GoroutineCount++
|
|
|
+
|
|
|
+ logMessage(result, fmt.Sprintf("发现文件:%s,启动协程进行处理\n", filePath), logger)
|
|
|
+
|
|
|
+ go func(filePath string) {
|
|
|
+ defer fileWg.Done()
|
|
|
+ semaphore <- struct{}{} // 获取信号量
|
|
|
+ defer func() { <-semaphore }() // 释放信号量
|
|
|
+
|
|
|
+ logMessage(result, fmt.Sprintf("开始处理文件:%s\n", filePath), logger)
|
|
|
+
|
|
|
+ // 处理单个 CSV 文件
|
|
|
+ err := processCSVFile(filePath, tableName, delimiter, pageSize, mysqeExec, result, &totalRowSize, logger)
|
|
|
+ if err != nil {
|
|
|
+ result.ErrorCount++
|
|
|
+ logMsg := fmt.Sprintf("处理文件 %s 失败:%v\n", filePath, err)
|
|
|
+ logMessage(result, logMsg, logger)
|
|
|
} else {
|
|
|
- tableResult.AverageRowSize = (tableResult.AverageRowSize + rowSize) / 2
|
|
|
+ logMessage(result, fmt.Sprintf("文件 %s 处理成功\n", filePath), logger)
|
|
|
}
|
|
|
- }
|
|
|
+ }(filePath)
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- // 插入数据到数据库
|
|
|
- err = mysqeExec.InsertRecordsToDB(tableName, headers, records)
|
|
|
- if err != nil {
|
|
|
- tableResult.ErrorCount++
|
|
|
- logMsg = fmt.Sprintf("无法将记录插入数据库: %v", err)
|
|
|
- fmt.Println(logMsg)
|
|
|
- tableResult.Logs = append(tableResult.Logs, logMsg)
|
|
|
- continue
|
|
|
- }
|
|
|
+ // 等待所有文件处理完毕
|
|
|
+ fileWg.Wait()
|
|
|
|
|
|
- logMsg = fmt.Sprintf("完成导入文件: %s,共插入 %d 行数据。", filePath, len(records))
|
|
|
- fmt.Println(logMsg)
|
|
|
- tableResult.Logs = append(tableResult.Logs, logMsg)
|
|
|
+ // 计算统计信息
|
|
|
+ result.ExportDuration = time.Since(startTime)
|
|
|
+ if result.TotalRows > 0 {
|
|
|
+ result.AverageRowSize = totalRowSize / int64(result.TotalRows)
|
|
|
}
|
|
|
- return nil
|
|
|
+
|
|
|
+ logMessage(result, fmt.Sprintf("表 %s 的数据处理完成,耗时:%v\n", tableName, result.ExportDuration), logger)
|
|
|
+
|
|
|
+ return *result
|
|
|
}
|
|
|
|
|
|
-// ReadEntireCSV 读取整个 CSV 文件到内存中
|
|
|
-func ReadEntireCSV(filePath string, delimiter rune) ([]string, [][]string, error) {
|
|
|
+// 处理单个 CSV 文件的函数
|
|
|
+func processCSVFile(
|
|
|
+ filePath string,
|
|
|
+ tableName string,
|
|
|
+ delimiter rune,
|
|
|
+ pageSize int,
|
|
|
+ mysqeExec db_executor.DBExecutor,
|
|
|
+ result *TableResult,
|
|
|
+ totalRowSize *int64,
|
|
|
+ logger *log.Logger,
|
|
|
+) error {
|
|
|
// 打开 CSV 文件
|
|
|
file, err := os.Open(filePath)
|
|
|
if err != nil {
|
|
|
- return nil, nil, fmt.Errorf("无法打开文件: %v", err)
|
|
|
+ return fmt.Errorf("打开文件失败:%v", err)
|
|
|
}
|
|
|
defer file.Close()
|
|
|
|
|
|
- // 创建 CSV Reader,设置分隔符
|
|
|
+ logMessage(result, fmt.Sprintf("打开文件成功:%s\n", filePath), logger)
|
|
|
+
|
|
|
+ // 获取文件大小,更新表的容量
|
|
|
+ fileInfo, err := file.Stat()
|
|
|
+ if err == nil {
|
|
|
+ result.TableSize += fileInfo.Size()
|
|
|
+ logMessage(result, fmt.Sprintf("文件大小:%d 字节,累计表大小:%d 字节\n", fileInfo.Size(), result.TableSize), logger)
|
|
|
+ }
|
|
|
+
|
|
|
+ // 创建 CSV Reader
|
|
|
reader := csv.NewReader(file)
|
|
|
reader.Comma = delimiter
|
|
|
|
|
|
- // 读取所有数据
|
|
|
- records, err := reader.ReadAll()
|
|
|
+ // 读取表头
|
|
|
+ headers, err := reader.Read()
|
|
|
if err != nil {
|
|
|
- return nil, nil, fmt.Errorf("无法读取 CSV 文件: %v", err)
|
|
|
+ return fmt.Errorf("读取表头失败:%v", err)
|
|
|
}
|
|
|
+ logMessage(result, fmt.Sprintf("读取表头成功:%v\n", headers), logger)
|
|
|
+
|
|
|
+ // 初始化记录批次
|
|
|
+ var recordsBatch [][]string
|
|
|
+
|
|
|
+ for {
|
|
|
+ // 读取一行记录
|
|
|
+ record, err := reader.Read()
|
|
|
+ if err == io.EOF {
|
|
|
+ // 文件读取完毕,插入剩余的记录
|
|
|
+ if len(recordsBatch) > 0 {
|
|
|
+ logMessage(result, fmt.Sprintf("文件读取完毕,插入剩余的 %d 条记录到数据库\n", len(recordsBatch)), logger)
|
|
|
+ err = mysqeExec.InsertRecordsToDB(tableName, headers, recordsBatch)
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("插入数据库失败:%v", err)
|
|
|
+ }
|
|
|
+ // 更新总行数
|
|
|
+ result.TotalRows += len(recordsBatch)
|
|
|
+ // 计算行大小
|
|
|
+ for _, rec := range recordsBatch {
|
|
|
+ *totalRowSize += int64(len(strings.Join(rec, "")))
|
|
|
+ }
|
|
|
+ logMessage(result, fmt.Sprintf("成功插入剩余记录,累计总行数:%d\n", result.TotalRows), logger)
|
|
|
+ }
|
|
|
+ break
|
|
|
+ }
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("读取记录失败:%v", err)
|
|
|
+ }
|
|
|
|
|
|
- if len(records) == 0 {
|
|
|
- return nil, nil, fmt.Errorf("CSV 文件为空")
|
|
|
- }
|
|
|
-
|
|
|
- // 第一个记录是表头
|
|
|
- headers := records[0]
|
|
|
- dataRecords := records[1:] // 跳过表头
|
|
|
-
|
|
|
- return headers, dataRecords, nil
|
|
|
-}
|
|
|
+ // 添加到批次
|
|
|
+ recordsBatch = append(recordsBatch, record)
|
|
|
|
|
|
-// GetCSVFiles 获取指定目录下的所有 CSV 文件路径
|
|
|
-func GetCSVFiles(dirPath string) ([]string, error) {
|
|
|
- var files []string
|
|
|
- // 遍历目录,查找所有 .csv 文件
|
|
|
- entries, err := os.ReadDir(dirPath)
|
|
|
- if err != nil {
|
|
|
- return nil, fmt.Errorf("无法读取目录 %s: %v", dirPath, err)
|
|
|
- }
|
|
|
- for _, entry := range entries {
|
|
|
- if !entry.IsDir() && filepath.Ext(entry.Name()) == ".csv" {
|
|
|
- files = append(files, filepath.Join(dirPath, entry.Name()))
|
|
|
+ // 如果达到批次大小,插入数据库
|
|
|
+ if len(recordsBatch) >= pageSize {
|
|
|
+ logMessage(result, fmt.Sprintf("达到批次大小 %d,插入记录到数据库\n", pageSize), logger)
|
|
|
+ err = mysqeExec.InsertRecordsToDB(tableName, headers, recordsBatch)
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("插入数据库失败:%v", err)
|
|
|
+ }
|
|
|
+ // 更新总行数
|
|
|
+ result.TotalRows += len(recordsBatch)
|
|
|
+ // 计算行大小
|
|
|
+ for _, rec := range recordsBatch {
|
|
|
+ *totalRowSize += int64(len(strings.Join(rec, "")))
|
|
|
+ }
|
|
|
+ logMessage(result, fmt.Sprintf("成功插入记录,累计总行数:%d\n", result.TotalRows), logger)
|
|
|
+ // 清空批次
|
|
|
+ recordsBatch = recordsBatch[:0]
|
|
|
}
|
|
|
}
|
|
|
- return files, nil
|
|
|
-}
|
|
|
|
|
|
-// getAvailableMemory 获取可用的系统内存
|
|
|
-func getAvailableMemory() int64 {
|
|
|
- var m runtime.MemStats
|
|
|
- runtime.ReadMemStats(&m)
|
|
|
- // 假设使用可用内存的 80%
|
|
|
- availableMemory := int64(m.Sys - m.Alloc)
|
|
|
- return availableMemory * 80 / 100
|
|
|
+ return nil
|
|
|
}
|