Forráskód Böngészése

feat:导出csv功能完成,导入功能未完成

GTong 8 hónapja
commit
1a5c1875df

+ 1 - 0
.gitingore

@@ -0,0 +1 @@
+GTool\

+ 40 - 0
cmd/export.go

@@ -0,0 +1,40 @@
+package cmd
+
+import (
+	"fmt"
+	"os"
+	"xg_fetl/internal/controllers"
+
+	"github.com/spf13/cobra"
+)
+
+// 定义导出子命令
+var exportCmd = &cobra.Command{
+	Use:   "exp",
+	Short: "导出数据模式",
+	Run: func(cmd *cobra.Command, args []string) {
+		// 定义参数变量
+		outputDir, _ := cmd.Flags().GetString("output")
+		tableName, _ := cmd.Flags().GetString("table")
+
+		// 检查参数
+		if tableName == "" {
+			fmt.Println("错误:必须指定表对象名 (-t)")
+			cmd.Usage()
+			os.Exit(1)
+		}
+
+		// 打印接收到的参数
+		fmt.Printf("执行模式: 导出\n")
+		fmt.Printf("输出目录: %s\n", outputDir)
+		fmt.Printf("表对象名: %s\n", tableName)
+
+		controllers.ExportController(outputDir)
+	},
+}
+
+// 初始化导出子命令的标志
+func init() {
+	exportCmd.Flags().StringP("output", "o", ".", "输出目录 (默认是当前目录)")
+	exportCmd.Flags().StringP("table", "t", "", "表对象名,格式为 模式名.表名 (例如: test.t1)")
+}

+ 45 - 0
cmd/import.go

@@ -0,0 +1,45 @@
+package cmd
+
+import (
+	"fmt"
+	"os"
+	"xg_fetl/internal/controllers"
+
+	"github.com/spf13/cobra"
+)
+
+// 定义导入子命令
+var importCmd = &cobra.Command{
+	Use:   "imp",
+	Short: "导入数据模式",
+	Run: func(cmd *cobra.Command, args []string) {
+		// 定义参数变量
+		inputPath, _ := cmd.Flags().GetString("input")
+		tableName, _ := cmd.Flags().GetString("table")
+
+		// 检查参数
+		if inputPath == "" {
+			fmt.Println("错误:必须指定文件名或文件目录 (-i)")
+			cmd.Usage()
+			os.Exit(1)
+		}
+
+		if tableName == "" {
+			fmt.Println("错误:必须指定表名 (-t)")
+			cmd.Usage()
+			os.Exit(1)
+		}
+
+		// 打印接收到的参数
+		fmt.Printf("执行模式: 导入\n")
+		fmt.Printf("输入路径: %s\n", inputPath)
+		fmt.Printf("表名: %s\n", tableName)
+		controllers.ImportController(inputPath)
+	},
+}
+
+// 初始化导入子命令的标志
+func init() {
+	importCmd.Flags().StringP("input", "i", "", "输入文件或目录路径")
+	importCmd.Flags().StringP("table", "t", "", "目标表名 (例如: t1)")
+}

+ 28 - 0
cmd/root.go

@@ -0,0 +1,28 @@
+package cmd
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/spf13/cobra"
+)
+
+// 定义根命令
+var rootCmd = &cobra.Command{
+	Use:   "app",
+	Short: "数据导入/导出工具",
+}
+
+// Execute 函数用于执行根命令
+func Execute() {
+	if err := rootCmd.Execute(); err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+}
+
+func init() {
+	// 在初始化时添加导入和导出的子命令
+	rootCmd.AddCommand(exportCmd)
+	rootCmd.AddCommand(importCmd)
+}

+ 24 - 0
config.toml

@@ -0,0 +1,24 @@
+[import]
+db_type = "mysql"
+dbname = "test1"
+user = "root"
+password = "123456"
+ip = "127.0.0.1"
+port = 3306
+dir_path = "output"
+delimiter = ","
+page_size = 10000
+filename_pattern = "^(.*)_[0-9]+\\.csv$"
+
+[export]
+db_type = "mysql"
+dbname = "gtong"
+user = "root"
+password = "123456"
+ip = "127.0.0.1"
+port = 3306
+table_name = "users"
+base_file_path = "./output/"
+delimiter = ","
+max_file_size = 1024
+page_size = 10000

+ 41 - 0
go.mod

@@ -0,0 +1,41 @@
+module xg_fetl
+
+go 1.22.2
+
+require (
+	github.com/go-sql-driver/mysql v1.8.1
+	github.com/spf13/cobra v1.8.1
+	github.com/spf13/viper v1.19.0
+	xorm.io/xorm v1.3.9
+)
+
+require (
+	filippo.io/edwards25519 v1.1.0 // indirect
+	github.com/fsnotify/fsnotify v1.7.0 // indirect
+	github.com/goccy/go-json v0.8.1 // indirect
+	github.com/golang/snappy v0.0.4 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/inconshreveable/mousetrap v1.1.0 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/pelletier/go-toml/v2 v2.2.2 // indirect
+	github.com/sagikazarmark/locafero v0.4.0 // indirect
+	github.com/sagikazarmark/slog-shim v0.1.0 // indirect
+	github.com/sourcegraph/conc v0.3.0 // indirect
+	github.com/spf13/afero v1.11.0 // indirect
+	github.com/spf13/cast v1.6.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/subosito/gotenv v1.6.0 // indirect
+	github.com/syndtr/goleveldb v1.0.0 // indirect
+	go.uber.org/atomic v1.9.0 // indirect
+	go.uber.org/multierr v1.9.0 // indirect
+	golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
+	golang.org/x/sys v0.18.0 // indirect
+	golang.org/x/text v0.14.0 // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+	xorm.io/builder v0.3.11-0.20220531020008-1bd24a7dc978 // indirect
+)

+ 159 - 0
go.sum

@@ -0,0 +1,159 @@
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
+gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/goccy/go-json v0.8.1 h1:4/Wjm0JIJaTDm8K1KcGrLHJoa8EsJ13YWeX+6Kfq6uI=
+github.com/goccy/go-json v0.8.1/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
+github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
+github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
+github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
+github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
+github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
+github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
+github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
+github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
+go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
+modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
+modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
+modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
+modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0=
+modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
+modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.4.0 h1:crykUfNSnMAXaOJnnxcSzbUGMqkLWjklJKkBK2nwZwk=
+modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.20.4 h1:J8+m2trkN+KKoE7jglyHYYYiaq5xmz2HoHJIiBlRzbE=
+modernc.org/sqlite v1.20.4/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A=
+modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg=
+modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+xorm.io/builder v0.3.11-0.20220531020008-1bd24a7dc978 h1:bvLlAPW1ZMTWA32LuZMBEGHAUOcATZjzHcotf3SWweM=
+xorm.io/builder v0.3.11-0.20220531020008-1bd24a7dc978/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
+xorm.io/xorm v1.3.9 h1:TUovzS0ko+IQ1XnNLfs5dqK1cJl1H5uHpWbWqAQ04nU=
+xorm.io/xorm v1.3.9/go.mod h1:LsCCffeeYp63ssk0pKumP6l96WZcHix7ChpurcLNuMw=

+ 116 - 0
internal/config/config.go

@@ -0,0 +1,116 @@
+package config
+
+import (
+	"fmt"
+	"regexp"
+
+	"github.com/spf13/viper"
+)
+
+/*
+Config 配置结构体,包含导入和导出相关的配置
+*/
+type Config struct {
+	ImportConfig ImportConfig // 导入配置
+	ExportConfig ExportConfig // 导出配置
+}
+
+/*
+ImportConfig 导入配置结构体
+包含数据库配置、导入目录、分隔符、分页大小以及文件名匹配模式等配置
+*/
+type ImportConfig struct {
+	DBConfig        DBConfig       // 数据库配置
+	DirPath         string         // 导入文件的目录路径
+	Delimiter       rune           // 导入文件的分隔符
+	PageSize        int            // 分页读取的大小
+	FilenamePattern *regexp.Regexp // 文件名匹配的正则表达式
+}
+
+/*
+ExportConfig 导出配置结构体
+包含数据库配置、导出表名、文件路径、分隔符、文件大小上限以及分页大小等配置
+*/
+type ExportConfig struct {
+	DBConfig     DBConfig // 数据库配置
+	TableName    string   // 要导出的表名
+	BaseFilePath string   // 导出文件的基本路径
+	Delimiter    rune     // CSV 的分隔符
+	MaxFileSize  int64    // 单个文件的最大大小 (字节)
+	PageSize     int      // 每次分页读取的行数
+}
+
+/*
+DBConfig 数据库配置结构体
+包含数据库类型、名称、用户名、密码、IP 地址和端口等信息
+*/
+type DBConfig struct {
+	DBType   string // 数据库类型 (如 MySQL, PostgreSQL 等)
+	DBName   string // 数据库名称
+	User     string // 数据库用户名
+	Password string // 数据库密码
+	IP       string // 数据库 IP 地址
+	Port     int    // 数据库端口号
+}
+
+func LoadConfig() (*Config, error) {
+	viper.SetConfigName("config") // 配置文件名(不带扩展名)
+	viper.SetConfigType("toml")   // 配置文件类型
+	viper.AddConfigPath(".")      // 查找配置文件的路径
+
+	if err := viper.ReadInConfig(); err != nil {
+		return nil, fmt.Errorf("error reading config file: %v", err)
+	}
+
+	// 解析 Import 配置
+	importPattern, err := regexp.Compile(viper.GetString("import.filename_pattern"))
+	if err != nil {
+		return nil, fmt.Errorf("error compiling filename pattern: %v", err)
+	}
+	importConfig := ImportConfig{
+		DBConfig: DBConfig{
+			DBType:   viper.GetString("import.db_type"),
+			DBName:   viper.GetString("import.dbname"),
+			User:     viper.GetString("import.user"),
+			Password: viper.GetString("import.password"),
+			IP:       viper.GetString("import.ip"),
+			Port:     viper.GetInt("import.port"),
+		},
+		DirPath:         viper.GetString("import.dir_path"),
+		Delimiter:       rune(viper.GetString("import.delimiter")[0]),
+		PageSize:        viper.GetInt("import.page_size"),
+		FilenamePattern: importPattern,
+	}
+
+	// 解析 Export 配置
+	exportConfig := ExportConfig{
+		DBConfig: DBConfig{
+			DBType:   viper.GetString("export.db_type"),
+			DBName:   viper.GetString("export.dbname"),
+			User:     viper.GetString("export.user"),
+			Password: viper.GetString("export.password"),
+			IP:       viper.GetString("export.ip"),
+			Port:     viper.GetInt("export.port"),
+		},
+		TableName:    viper.GetString("export.table_name"),
+		BaseFilePath: viper.GetString("export.base_file_path"),
+		Delimiter:    rune(viper.GetString("export.delimiter")[0]),
+		MaxFileSize:  viper.GetInt64("export.max_file_size"),
+		PageSize:     viper.GetInt("export.page_size"),
+	}
+
+	return &Config{
+		ImportConfig: importConfig,
+		ExportConfig: exportConfig,
+	}, nil
+}
+
+func (dbConfig *DBConfig) GetDSN() string {
+	switch dbConfig.DBType {
+	case "mysql":
+		return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", dbConfig.User, dbConfig.Password, dbConfig.IP, dbConfig.Port, dbConfig.DBName)
+	case "xugu":
+		return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", dbConfig.User, dbConfig.Password, dbConfig.IP, dbConfig.Port, dbConfig.DBName)
+	}
+	return ""
+}

+ 187 - 0
internal/controllers/export_controller.go

@@ -0,0 +1,187 @@
+package controllers
+
+import (
+	"fmt"
+	"os"
+	"xg_fetl/internal/db_executor"
+	"xg_fetl/internal/global"
+	"xg_fetl/internal/models"
+	"xg_fetl/internal/services"
+	"xg_fetl/internal/utils"
+)
+
+func ExportController(outputDir string) {
+
+	dbType := global.Cfg.ExportConfig.DBConfig.DBType
+	dsn := global.Cfg.ExportConfig.DBConfig.GetDSN()
+
+	dbName := global.Cfg.ExportConfig.DBConfig.DBName
+	dbIp := global.Cfg.ExportConfig.DBConfig.IP
+	// baseFilePath := global.Cfg.ExportConfig.BaseFilePath
+	delimiter := global.Cfg.ExportConfig.Delimiter
+	maxFileSize := global.Cfg.ExportConfig.MaxFileSize
+	pageSize := global.Cfg.ExportConfig.PageSize
+
+	// !创建数据库链接
+	executor, err := db_executor.NewDBExecutor(dbType, dsn)
+	if err != nil {
+		fmt.Println(fmt.Errorf("create db executor failed, err: %v", err))
+		panic(err)
+	}
+	defer executor.Close()
+	// ! 获取数据库元信息
+	dbInfo, err := services.GetDBInfoService(dbName, executor)
+	if err != nil {
+		fmt.Println(fmt.Errorf("get db info failed, err: %v", err))
+		panic(err)
+	}
+
+	// !创建存放路径
+	outputDir = "./GTool" + "/" + dbName + "_" + dbIp
+	if err := utils.CreateDirectory(outputDir); err != nil {
+		fmt.Println(fmt.Errorf("创建文件夹失败,可能已存在同名文件夹 %s, err: %v", outputDir, err))
+		return
+	}
+
+	PrintDatabaseInfo(dbInfo)
+	//存放文件夹名字为库名
+	dbInfoFile := outputDir + "/" + "db_info.txt"
+	WriteDatabaseInfoToFile(dbInfo, dbInfoFile)
+
+	// !导出
+	// // 创建一个空的 tableNames 数组,用于存放表名
+	// tableNames := make([]string, 0, len(dbInfo.Tables)+len(dbInfo.LobTables))
+	// // 将 tableInfoMap 中的表名添加到 tableNames 中
+	// for tableName := range dbInfo.Tables {
+	// 	tableNames = append(tableNames, tableName)
+	// }
+	// // 将 lobTableInfoMap 中的表名添加到 tableNames 中
+	// for tableName := range dbInfo.LobTables {
+	// 	tableNames = append(tableNames, tableName)
+	// }
+	// 创建一个新的 map,将普通表和 LOB 表的信息都放进去
+	allTables := make(map[string]models.TableInfo, len(dbInfo.Tables)+len(dbInfo.LobTables))
+
+	// 将 tableInfoMap 中的表信息添加到 allTables 中
+	for tableName, tableInfo := range dbInfo.Tables {
+		allTables[tableName] = tableInfo
+	}
+
+	// 将 lobTableInfoMap 中的表信息添加到 allTables 中
+	for tableName, tableInfo := range dbInfo.LobTables {
+		allTables[tableName] = tableInfo
+	}
+	services.WriteMain(executor, allTables, outputDir, delimiter, maxFileSize, pageSize)
+}
+
+// 打印 DatabaseInfo 的函数
+func PrintDatabaseInfo(dbInfo *models.DatabaseInfo) {
+	if dbInfo == nil {
+		fmt.Println("数据库信息为空")
+		return
+	}
+
+	fmt.Printf("数据库总大小: %.2f MB\n", dbInfo.SizeMB)
+	fmt.Printf("表的数量: %d\n\n", dbInfo.TableCount)
+
+	fmt.Println("普通表信息:")
+	for _, table := range dbInfo.Tables {
+		fmt.Printf("表名: %s\n", table.Name)
+		fmt.Printf("大小: %.2f MB\n", table.Size)
+		fmt.Printf("行数: %d\n", table.Count)
+		fmt.Printf("DDL:\n%s\n\n", table.DDL)
+	}
+
+	fmt.Println("LOB 表信息:")
+	for _, table := range dbInfo.LobTables {
+		fmt.Printf("表名: %s\n", table.Name)
+		fmt.Printf("大小: %.2f MB\n", table.Size)
+		fmt.Printf("行数: %d\n", table.Count)
+		fmt.Printf("DDL:\n%s\n\n", table.DDL)
+	}
+
+	fmt.Println("存储过程和函数:")
+	for _, function := range dbInfo.Functions {
+		fmt.Printf("名称: %s\n", function.Name)
+		fmt.Printf("类型: %s\n", function.Type)
+		fmt.Printf("定义:\n%s\n\n", function.Definition)
+	}
+
+	fmt.Println("触发器:")
+	for _, trigger := range dbInfo.Triggers {
+		fmt.Printf("名称: %s\n", trigger.Name)
+		fmt.Printf("事件: %s\n", trigger.Event)
+		fmt.Printf("表: %s\n", trigger.Table)
+		fmt.Printf("时机: %s\n", trigger.Timing)
+		fmt.Printf("语句:\n%s\n\n", trigger.Statement)
+	}
+}
+
+// WriteDatabaseInfoToFile 将 DatabaseInfo 写入文件,并格式化划分信息
+func WriteDatabaseInfoToFile(dbInfo *models.DatabaseInfo, filePath string) error {
+	if dbInfo == nil {
+		return fmt.Errorf("数据库信息为空")
+	}
+
+	// 打开或创建文件
+	file, err := os.Create(filePath)
+	if err != nil {
+		return fmt.Errorf("无法创建文件: %v", err)
+	}
+	defer file.Close()
+
+	// 计算表的数量(普通表和 LOB 表)
+	normalTableCount := len(dbInfo.Tables)
+	lobTableCount := len(dbInfo.LobTables)
+	functionCount := len(dbInfo.Functions)
+	triggerCount := len(dbInfo.Triggers)
+
+	// 写入数据库总大小、表数量以及各类表和对象的数量
+	fmt.Fprintf(file, "--======== 数据库信息 ========\n")
+	fmt.Fprintf(file, "数据库总大小: %.2f MB\n", dbInfo.SizeMB)
+	fmt.Fprintf(file, "表的总数量: %d\n", dbInfo.TableCount)
+	fmt.Fprintf(file, "普通表的数量: %d\n", normalTableCount)
+	fmt.Fprintf(file, "LOB 表的数量: %d\n", lobTableCount)
+	fmt.Fprintf(file, "存储过程和函数的数量: %d\n", functionCount)
+	fmt.Fprintf(file, "触发器的数量: %d\n\n", triggerCount)
+
+	// 写入普通表信息
+	fmt.Fprintf(file, "--======== 普通表信息 ========\n")
+	for _, table := range dbInfo.Tables {
+		fmt.Fprintf(file, "-------- 表名: %s --------\n", table.Name)
+		fmt.Fprintf(file, "大小: %.2f MB\n", table.Size)
+		fmt.Fprintf(file, "行数: %d\n", table.Count)
+		fmt.Fprintf(file, "DDL:\n%s\n\n", table.DDL)
+	}
+
+	// 写入 LOB 表信息
+	fmt.Fprintf(file, "--======== LOB 表信息 ========\n")
+	for _, table := range dbInfo.LobTables {
+		fmt.Fprintf(file, "-------- 表名: %s --------\n", table.Name)
+		fmt.Fprintf(file, "大小: %.2f MB\n", table.Size)
+		fmt.Fprintf(file, "行数: %d\n", table.Count)
+		fmt.Fprintf(file, "DDL:\n%s\n\n", table.DDL)
+	}
+
+	// 写入存储过程和函数信息
+	fmt.Fprintf(file, "--======== 存储过程和函数 ========\n")
+	for _, function := range dbInfo.Functions {
+		fmt.Fprintf(file, "-------- 名称: %s --------\n", function.Name)
+		fmt.Fprintf(file, "类型: %s\n", function.Type)
+		fmt.Fprintf(file, "定义:\n%s\n\n", function.Definition)
+	}
+
+	// 写入触发器信息
+	fmt.Fprintf(file, "--======== 触发器 ========\n")
+	for _, trigger := range dbInfo.Triggers {
+		fmt.Fprintf(file, "-------- 名称: %s --------\n", trigger.Name)
+		fmt.Fprintf(file, "事件: %s\n", trigger.Event)
+		fmt.Fprintf(file, "表: %s\n", trigger.Table)
+		fmt.Fprintf(file, "时机: %s\n", trigger.Timing)
+		fmt.Fprintf(file, "语句:\n%s\n\n", trigger.Statement)
+	}
+
+	fmt.Fprintf(file, "--============================\n")
+
+	return nil
+}

+ 52 - 0
internal/controllers/import_controller.go

@@ -0,0 +1,52 @@
+package controllers
+
+import (
+	"fmt"
+	"xg_fetl/internal/db_executor"
+	"xg_fetl/internal/global"
+	"xg_fetl/internal/models"
+	"xg_fetl/internal/services"
+)
+
+func ImportController(outputDir string) {
+
+	dbType := global.Cfg.ImportConfig.DBConfig.DBType
+	dsn := global.Cfg.ImportConfig.DBConfig.GetDSN()
+
+	dbName := global.Cfg.ImportConfig.DBConfig.DBName
+	//dbIp := global.Cfg.ImportConfig.DBConfig.IP
+	// baseFilePath := global.Cfg.ExportConfig.BaseFilePath
+	delimiter := global.Cfg.ImportConfig.Delimiter
+
+	pageSize := global.Cfg.ImportConfig.PageSize
+
+	// !创建数据库链接
+	executor, err := db_executor.NewDBExecutor(dbType, dsn)
+	if err != nil {
+		fmt.Println(fmt.Errorf("create db executor failed, err: %v", err))
+		panic(err)
+	}
+	defer executor.Close()
+	// ! 获取数据库元信息
+	dbInfo, err := services.GetDBInfoService(dbName, executor)
+	if err != nil {
+		fmt.Println(fmt.Errorf("get db info failed, err: %v", err))
+		panic(err)
+	}
+
+	// 创建一个新的 map,将普通表和 LOB 表的信息都放进去
+	allTables := make(map[string]models.TableInfo, len(dbInfo.Tables)+len(dbInfo.LobTables))
+
+	// 将 tableInfoMap 中的表信息添加到 allTables 中
+	for tableName, tableInfo := range dbInfo.Tables {
+		allTables[tableName] = tableInfo
+	}
+
+	// 将 lobTableInfoMap 中的表信息添加到 allTables 中
+	for tableName, tableInfo := range dbInfo.LobTables {
+		allTables[tableName] = tableInfo
+	}
+
+	services.ReaderMain(dbName, outputDir, allTables, delimiter, pageSize, executor)
+
+}

+ 85 - 0
internal/db_executor/db_executor.go

@@ -0,0 +1,85 @@
+package db_executor
+
+import (
+	"fmt"
+	"xg_fetl/internal/models"
+
+	_ "github.com/go-sql-driver/mysql"
+
+	"xorm.io/xorm"
+)
+
+type DBExecutor interface {
+	InitDB(dsn string) error
+	QueryRowsAsMaps(sqlStr string) ([]map[string]string, error)
+	GetColumnsAndRows(query string) ([]string, []map[string]interface{}, error)
+	Close() error
+
+	// 定义其他通用的方法
+	GetDatabaseInfo(database string) (*models.DatabaseInfo, error)
+	ReadTableFromDBWithPagination(tableName string, offset int, pageSize int) ([]string, [][]string, error)
+	InsertRecordsToDB(tableName string, headers []string, records [][]string) error
+}
+
+type BaseExecutor struct {
+	Engine *xorm.Engine
+}
+
+func (b *BaseExecutor) Close() error {
+	return b.Engine.Close()
+}
+
+// InitDB 初始化数据库连接
+func (b *BaseExecutor) InitDB(dsn string) error {
+	engine, err := xorm.NewEngine("mysql", dsn)
+	if err != nil {
+		return fmt.Errorf("failed to initialize DB: %v", err)
+	}
+	b.Engine = engine
+	return nil
+}
+
+// QueryRowsAsMaps 通用的查询方法,返回每行结果的映射
+func (b *BaseExecutor) QueryRowsAsMaps(sqlStr string) ([]map[string]string, error) {
+	results, err := b.Engine.QueryString(sqlStr)
+	if err != nil {
+		return nil, err
+	}
+	return results, nil // xorm 已经将每行结果映射为 map[string]interface{}
+}
+
+// GetColumnsAndRows 执行查询并返回字段名和结果集
+func (b *BaseExecutor) GetColumnsAndRows(query string) ([]string, []map[string]interface{}, error) {
+	rows, err := b.Engine.QueryInterface(query)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// 获取列名
+	if len(rows) == 0 {
+		return nil, nil, fmt.Errorf("no results found")
+	}
+	columns := make([]string, 0, len(rows[0]))
+	for col := range rows[0] {
+		columns = append(columns, col)
+	}
+
+	// 返回列名和结果集
+	return columns, rows, nil
+}
+
+// 工厂函数,根据数据库类型创建相应的执行器
+func NewDBExecutor(dbType string, dsn string) (DBExecutor, error) {
+	switch dbType {
+	case "mysql":
+		engine, err := xorm.NewEngine("mysql", dsn)
+		if err != nil {
+			return nil, fmt.Errorf("failed to initialize DB: %v", err)
+		}
+
+		return &MySQLExecutor{BaseExecutor: BaseExecutor{Engine: engine}}, nil
+
+	default:
+		return nil, fmt.Errorf("unsupported database type: %s", dbType)
+	}
+}

+ 306 - 0
internal/db_executor/mysql_executor.go

@@ -0,0 +1,306 @@
+package db_executor
+
+import (
+	"database/sql"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+	"xg_fetl/internal/models"
+
+	_ "github.com/go-sql-driver/mysql"
+	"xorm.io/xorm"
+)
+
+type MySQLExecutor struct {
+	BaseExecutor
+}
+
+func (m *MySQLExecutor) InitDB(dsn string) error {
+	var err error
+	m.Engine, err = xorm.NewEngine("mysql", dsn)
+	if err != nil {
+		return fmt.Errorf("failed to initialize DB: %v", err)
+	}
+	// 设置连接池参数
+	m.Engine.SetMaxOpenConns(10)
+	m.Engine.SetMaxIdleConns(5)
+	m.Engine.SetConnMaxLifetime(time.Hour)
+	return nil
+}
+
+// GetDatabases 获取所有数据库名称,排除系统数据库
+func (m *MySQLExecutor) GetDatabases(excludeDBs []string) ([]string, error) {
+	var databases []struct {
+		Database string `xorm:"Database"`
+	}
+
+	// 执行 SHOW DATABASES 查询
+	err := m.Engine.SQL("SHOW DATABASES").Find(&databases)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get databases: %v", err)
+	}
+
+	// 获取所有数据库名
+	var allDBs []string
+	for _, db := range databases {
+		allDBs = append(allDBs, db.Database)
+	}
+
+	// 过滤排除的数据库
+	excludeDBsMap := make(map[string]struct{})
+	for _, db := range excludeDBs {
+		excludeDBsMap[db] = struct{}{}
+	}
+
+	var dbs []string
+	for _, db := range allDBs {
+		if _, ok := excludeDBsMap[db]; !ok {
+			dbs = append(dbs, db)
+		}
+	}
+	return dbs, nil
+}
+
+// 添加MySQL特有的操作方法
+func (m *MySQLExecutor) GetEngine() *xorm.Engine {
+	return m.Engine
+}
+
+func (m *MySQLExecutor) GetDatabaseInfo(database string) (*models.DatabaseInfo, error) {
+	// 获取表列表
+	tables, err := m.Engine.DBMetas()
+	if err != nil {
+		return nil, fmt.Errorf("failed to get tables: %v", err)
+	}
+
+	// 初始化普通表和 LOB 表信息的映射
+	tableInfoMap := make(map[string]models.TableInfo)    // 普通表
+	lobTableInfoMap := make(map[string]models.TableInfo) // LOB 表
+
+	// 查询数据库的总大小
+	query := `SELECT SUM(data_length + index_length) / 1024 / 1024 AS size_mb
+	          FROM information_schema.tables WHERE table_schema = ?`
+	var size sql.NullFloat64
+	found, err := m.Engine.SQL(query, database).Get(&size)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get database size: %v", err)
+	}
+	if !found {
+		return nil, fmt.Errorf("no size information found for database: %s", database)
+	}
+
+	// 查询每个表的大小
+	query = `SELECT TABLE_NAME, (data_length + index_length) / 1024 / 1024 AS size_mb
+	         FROM information_schema.tables WHERE table_schema = ?`
+	results, err := m.Engine.QueryString(query, database)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get table sizes: %v", err)
+	}
+
+	// 解析每个表的信息
+	for _, row := range results {
+		tableName := row["TABLE_NAME"]
+		tableSizeStr := row["size_mb"]
+		tableSize, _ := strconv.ParseFloat(tableSizeStr, 64)
+
+		// 初始化 TableInfo
+		info := models.TableInfo{
+			Name: tableName,
+			Size: tableSize,
+		}
+
+		// 获取表的 DDL
+		ddlQuery := fmt.Sprintf("SHOW CREATE TABLE `%s`", tableName)
+		ddlResult, err := m.Engine.QueryString(ddlQuery)
+		if err != nil {
+			return nil, fmt.Errorf("failed to get DDL for table %s: %v", tableName, err)
+		}
+		if len(ddlResult) > 0 {
+			info.DDL = ddlResult[0]["Create Table"]
+		}
+
+		// 获取表的行数
+		countQuery := fmt.Sprintf("SELECT COUNT(*) AS row_count FROM `%s`", tableName)
+		countResult, err := m.Engine.QueryString(countQuery)
+		if err != nil {
+			return nil, fmt.Errorf("failed to get row count for table %s: %v", tableName, err)
+		}
+		if len(countResult) > 0 {
+			info.Count, _ = strconv.Atoi(countResult[0]["row_count"])
+		}
+
+		// 检查表是否包含 LOB 类型字段
+		columnQuery := fmt.Sprintf(`SELECT COLUMN_NAME, DATA_TYPE 
+		                            FROM information_schema.columns 
+		                            WHERE table_schema = '%s' AND table_name = '%s'`, database, tableName)
+		columns, err := m.Engine.QueryString(columnQuery)
+		if err != nil {
+			return nil, fmt.Errorf("failed to get columns for table %s: %v", tableName, err)
+		}
+
+		// 遍历列,检查是否有 LOB 类型(TEXT, BLOB 等)
+		isLobTable := false
+		for _, column := range columns {
+			dataType := column["DATA_TYPE"]
+			if dataType == "text" || dataType == "blob" || dataType == "mediumtext" || dataType == "longtext" || dataType == "mediumblob" || dataType == "longblob" {
+				isLobTable = true
+				break
+			}
+		}
+
+		// 将表信息存入对应的映射
+		if isLobTable {
+			lobTableInfoMap[tableName] = info
+		} else {
+			tableInfoMap[tableName] = info
+		}
+	}
+
+	// 确定数据库总大小
+	sizeMB := 0.0
+	if size.Valid {
+		sizeMB = size.Float64
+	}
+
+	//-- 获取存储过程和函数
+	funcsQuery := `SELECT ROUTINE_NAME, ROUTINE_TYPE, ROUTINE_DEFINITION
+  FROM information_schema.ROUTINES
+  WHERE ROUTINE_SCHEMA = ?`
+
+	funcsResults, err := m.Engine.QueryString(funcsQuery, database)
+	if err != nil {
+		return nil, fmt.Errorf("获取存储过程/函数失败: %v", err)
+	}
+
+	var functionInfos []models.FunctionInfo
+	for _, row := range funcsResults {
+		functionInfo := models.FunctionInfo{
+			Name:       row["ROUTINE_NAME"],
+			Type:       row["ROUTINE_TYPE"],
+			Definition: row["ROUTINE_DEFINITION"],
+		}
+		functionInfos = append(functionInfos, functionInfo)
+	}
+
+	// 获取触发器
+	triggersQuery := `SELECT TRIGGER_NAME, EVENT_MANIPULATION, EVENT_OBJECT_TABLE,
+	 ACTION_TIMING, ACTION_STATEMENT
+	 FROM information_schema.TRIGGERS
+	 WHERE TRIGGER_SCHEMA = ?`
+
+	triggersResults, err := m.Engine.QueryString(triggersQuery, database)
+	if err != nil {
+		return nil, fmt.Errorf("获取触发器失败: %v", err)
+	}
+
+	var triggerInfos []models.TriggerInfo
+	for _, row := range triggersResults {
+		triggerInfo := models.TriggerInfo{
+			Name:      row["TRIGGER_NAME"],
+			Event:     row["EVENT_MANIPULATION"],
+			Table:     row["EVENT_OBJECT_TABLE"],
+			Timing:    row["ACTION_TIMING"],
+			Statement: row["ACTION_STATEMENT"],
+		}
+		triggerInfos = append(triggerInfos, triggerInfo)
+	}
+
+	// 返回包含总大小、表数量和表信息的 DatabaseInfo 结构体
+	return &models.DatabaseInfo{
+		SizeMB:     sizeMB,
+		TableCount: len(tables),
+		Tables:     tableInfoMap,
+		LobTables:  lobTableInfoMap,
+		Functions:  functionInfos,
+		Triggers:   triggerInfos,
+	}, nil
+}
+
+// ReadTableFromDBWithPagination 使用 Xorm 分页从数据库中读取表数据
+// 参数:
+// - engine: Xorm 引擎实例,用于连接和操作数据库
+// - tableName: 要读取的表名
+// - offset: 分页查询的起始偏移量
+// - pageSize: 每次分页查询的行数
+// 返回值:
+// - headers: 表的列名列表
+// - records: 表的数据,包含每行的内容
+// - error: 如果查询出错,返回相关的错误信息
+func (m *MySQLExecutor) ReadTableFromDBWithPagination(tableName string, offset int, pageSize int) ([]string, [][]string, error) {
+	// 构建 SQL 查询,使用 LIMIT 和 OFFSET 来分页查询数据
+	sql := fmt.Sprintf("SELECT * FROM %s LIMIT %d OFFSET %d", tableName, pageSize, offset)
+	rows, err := m.Engine.QueryString(sql)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to query table: %v", err)
+	}
+
+	// 如果没有查询到数据,则返回 nil
+	if len(rows) == 0 {
+		return nil, nil, nil
+	}
+
+	// 获取列名
+	headers := make([]string, 0)
+	for key := range rows[0] {
+		headers = append(headers, key)
+	}
+
+	// 获取数据行
+	records := make([][]string, 0)
+	for _, row := range rows {
+		record := make([]string, len(headers))
+		for i, header := range headers {
+			record[i] = row[header]
+		}
+		records = append(records, record)
+	}
+
+	return headers, records, nil
+}
+
+// InsertRecordsToDB 使用 Xorm 的 Session 结合 Prepare 语句将记录插入到数据库中
+// 参数:
+// - engine: Xorm 引擎实例,用于连接和操作数据库
+// - tableName: 插入的目标表名
+// - headers: 表头,用于生成插入语句中的列名
+// - records: 要插入的数据记录,每行对应表的一条记录
+// 返回值:
+// - error: 如果插入过程中有错误,返回相关的错误信息
+func (m *MySQLExecutor) InsertRecordsToDB(tableName string, headers []string, records [][]string) error {
+	if len(records) == 0 {
+		return nil // 如果没有记录,不进行任何插入操作
+	}
+
+	// 获取 Xorm 底层的 *sql.DB 实例
+	db := m.Engine.DB().DB
+
+	// 构建插入 SQL 语句
+	columnNames := strings.Join(headers, ", ")
+	placeholders := strings.Repeat("?, ", len(headers))
+	placeholders = strings.TrimSuffix(placeholders, ", ")
+	insertSQL := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", tableName, columnNames, placeholders)
+
+	// 准备插入语句
+	stmt, err := db.Prepare(insertSQL)
+	if err != nil {
+		return fmt.Errorf("failed to prepare insert statement: %v", err)
+	}
+	defer stmt.Close()
+
+	// 插入记录
+	for _, record := range records {
+		args := make([]interface{}, len(record))
+		for i, v := range record {
+			args[i] = v
+		}
+
+		// 使用准备好的语句执行插入
+		_, err := stmt.Exec(args...)
+		if err != nil {
+			return fmt.Errorf("failed to execute insert statement: %v", err)
+		}
+	}
+
+	return nil
+}

+ 3 - 0
internal/db_executor/xugu_executor.go

@@ -0,0 +1,3 @@
+package db_executor
+
+

+ 31 - 0
internal/global/global.go

@@ -0,0 +1,31 @@
+package global
+
+import (
+	"fmt"
+	"os"
+	"xg_fetl/internal/config"
+)
+
+var Cfg *config.Config
+
+func init() { // 初始化全局变量
+	var err error
+	Cfg, err = config.LoadConfig()
+	if err != nil {
+		panic(err)
+	}
+}
+
+func InitAll() {
+	//创建输出文件夹
+	folderName := "GTool"
+	_, err := os.Stat(folderName)
+	if os.IsNotExist(err) {
+		err := os.Mkdir(folderName, 0755)
+		if err != nil {
+			fmt.Println("创建文件夹失败:", err)
+			return
+		}
+		fmt.Println("文件夹创建成功。")
+	}
+}

+ 40 - 0
internal/models/database.go

@@ -0,0 +1,40 @@
+package models
+
+// DatabaseInfo 包含数据库的总大小、表数量和表名列表
+type DatabaseInfo struct {
+	SizeMB     float64
+	TableCount int
+	Tables     map[string]TableInfo
+	LobTables  map[string]TableInfo
+	Functions  []FunctionInfo
+	Triggers   []TriggerInfo
+}
+
+type TableInfo struct {
+	Name  string
+	Size  float64
+	DDL   string
+	Count int
+}
+type FunctionInfo struct {
+	Name       string
+	Type       string // "FUNCTION" 或 "PROCEDURE"
+	Definition string
+}
+
+type TriggerInfo struct {
+	Name      string
+	Event     string // "INSERT", "UPDATE", "DELETE" 等
+	Table     string
+	Timing    string // "BEFORE" 或 "AFTER"
+	Statement string
+}
+
+// DbConnParams 定义了数据库连接参数的结构体
+type DbConnParams struct {
+	Host     string
+	Port     string
+	Database string
+	User     string
+	Password string
+}

+ 15 - 0
internal/services/db_info_services.go

@@ -0,0 +1,15 @@
+package services
+
+import (
+	"xg_fetl/internal/db_executor"
+	"xg_fetl/internal/models"
+)
+
+func GetDBInfoService(dbName string, executor db_executor.DBExecutor) (*models.DatabaseInfo, error) {
+	dbInfo, err := executor.GetDatabaseInfo(dbName)
+	if err != nil {
+		return nil, err
+	}
+
+	return dbInfo, nil
+}

+ 285 - 0
internal/services/reader_services.go

@@ -0,0 +1,285 @@
+package services
+
+import (
+	"encoding/csv"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sort"
+	"sync"
+	"time"
+
+	"regexp"
+	"xg_fetl/internal/db_executor"
+	"xg_fetl/internal/models"
+)
+
+// ReaderMain 主函数,参数化配置,使其更加灵活
+func ReaderMain(
+	dbName string, // 数据库名称
+	dirPath string, // CSV 文件所在的根目录
+	tableInfos map[string]models.TableInfo, // 要处理的表信息
+	delimiter rune, // CSV 文件的分隔符
+	pageSize int, // 分页大小,每次读取的记录数
+	mysqeExec db_executor.DBExecutor, // 数据库执行器接口
+) {
+	// 创建用于接收每个表处理结果的通道
+	resultChan := make(chan TableResult)
+	// 创建等待组,用于等待所有表的处理完成
+	var wg sync.WaitGroup
+
+	// 遍历每个表进行处理,使用协程
+	for _, tableName := range sortedTableNames(tableInfos) {
+		wg.Add(1)
+
+		go func(tableName string) {
+			defer wg.Done()
+
+			// 开始处理表,记录开始时间
+			startTime := time.Now()
+
+			// 初始化 TableResult
+			tableResult := TableResult{
+				TableName:      tableName,
+				Success:        false,
+				GoroutineCount: 1, // 只使用一个协程
+				Logs:           []string{},
+			}
+
+			// 构建表名对应的文件夹路径,例如 dirPath/tableName
+			tableDirPath := filepath.Join(dirPath, tableName)
+
+			// 检查表名对应的文件夹是否存在
+			if _, err := os.Stat(tableDirPath); os.IsNotExist(err) {
+				errMsg := fmt.Sprintf("表 %s 对应的目录不存在: %v", tableName, err)
+				tableResult.Error = fmt.Errorf(errMsg)
+				tableResult.Logs = append(tableResult.Logs, errMsg)
+				resultChan <- tableResult
+				return
+			}
+
+			// 编译匹配 CSV 文件名的正则表达式,格式为 表名_[序号].csv
+			filenamePattern := fmt.Sprintf(`^%s_\d+\.csv$`, regexp.QuoteMeta(tableName))
+			re, err := regexp.Compile(filenamePattern)
+			if err != nil {
+				errMsg := fmt.Sprintf("无法编译文件名正则表达式: %v", err)
+				tableResult.Error = fmt.Errorf(errMsg)
+				tableResult.Logs = append(tableResult.Logs, errMsg)
+				resultChan <- tableResult
+				return
+			}
+
+			// 获取表目录下的所有 CSV 文件路径
+			files, err := GetCSVFiles(tableDirPath)
+			if err != nil {
+				errMsg := fmt.Sprintf("无法获取 CSV 文件: %v", err)
+				tableResult.Error = fmt.Errorf(errMsg)
+				tableResult.Logs = append(tableResult.Logs, errMsg)
+				resultChan <- tableResult
+				return
+			}
+
+			// 过滤出符合命名规则的文件列表,并按文件名排序
+			var matchedFiles []string
+			for _, filePath := range files {
+				filename := filepath.Base(filePath)
+				if re.MatchString(filename) {
+					matchedFiles = append(matchedFiles, filePath)
+				}
+			}
+
+			if len(matchedFiles) == 0 {
+				errMsg := fmt.Sprintf("表 %s 没有找到符合条件的 CSV 文件", tableName)
+				tableResult.Error = fmt.Errorf(errMsg)
+				tableResult.Logs = append(tableResult.Logs, errMsg)
+				resultChan <- tableResult
+				return
+			}
+
+			// 按文件名排序,确保顺序一致
+			sort.Strings(matchedFiles)
+
+			// 估算可用内存,决定一次加载多少个文件
+			availableMemory := getAvailableMemory()
+			var batchFiles []string
+			var totalSize int64 = 0
+			fmt.Println("可用内存:", availableMemory)
+			for idx, filePath := range matchedFiles {
+				fileInfo, err := os.Stat(filePath)
+				if err != nil {
+					tableResult.ErrorCount++
+					logMsg := fmt.Sprintf("无法获取文件信息 %s: %v", filePath, err)
+					tableResult.Logs = append(tableResult.Logs, logMsg)
+					continue
+				}
+				fileSize := fileInfo.Size()
+
+				// 判断是否超过可用内存
+				if totalSize+fileSize > availableMemory && len(batchFiles) > 0 {
+					// 处理当前批次文件
+					fmt.Println("处理当前批次文件", batchFiles)
+					err = processCSVFiles(batchFiles, delimiter, mysqeExec, tableName, &tableResult)
+					if err != nil {
+						tableResult.ErrorCount++
+						logMsg := fmt.Sprintf("处理文件批次时出错: %v", err)
+						tableResult.Logs = append(tableResult.Logs, logMsg)
+					}
+					// 重置批次
+					batchFiles = []string{}
+					fmt.Println("重置批次")
+					totalSize = 0
+				}
+
+				batchFiles = append(batchFiles, filePath)
+				totalSize += fileSize
+
+				// 如果是最后一个文件,处理剩余的批次
+				if idx == len(matchedFiles)-1 && len(batchFiles) > 0 {
+					fmt.Println(",处理剩余的批次 ", batchFiles)
+					err = processCSVFiles(batchFiles, delimiter, mysqeExec, tableName, &tableResult)
+					if err != nil {
+						tableResult.ErrorCount++
+						logMsg := fmt.Sprintf("处理文件批次时出错: %v", err)
+						tableResult.Logs = append(tableResult.Logs, logMsg)
+					}
+				}
+			}
+
+			// 计算导出持续时间
+			tableResult.ExportDuration = time.Since(startTime)
+			tableResult.Success = tableResult.ErrorCount == 0
+
+			resultChan <- tableResult
+		}(tableName)
+	}
+
+	// 开启一个协程来收集结果并打印
+	go func() {
+		wg.Wait()
+		close(resultChan)
+	}()
+
+	// 打印结果
+	for result := range resultChan {
+		if result.Success {
+			fmt.Printf("表 %s 导入成功,耗时 %v,共插入 %d 行数据。\n", result.TableName, result.ExportDuration, result.TotalRows)
+		} else {
+			fmt.Printf("表 %s 导入完成,存在错误,错误数量 %d,耗时 %v。\n", result.TableName, result.ErrorCount, result.ExportDuration)
+		}
+		// 打印详细日志
+		for _, logMsg := range result.Logs {
+			fmt.Println(logMsg)
+		}
+	}
+}
+
+// processCSVFiles 处理一批 CSV 文件
+func processCSVFiles(
+	files []string,
+	delimiter rune,
+	mysqeExec db_executor.DBExecutor,
+	tableName string,
+	tableResult *TableResult,
+) error {
+	for _, filePath := range files {
+		logMsg := fmt.Sprintf("开始读入文件: %s", filePath)
+		fmt.Println(logMsg)
+		tableResult.Logs = append(tableResult.Logs, logMsg)
+
+		// 读取整个 CSV 文件到内存中
+		headers, records, err := ReadEntireCSV(filePath, delimiter)
+		if err != nil {
+			tableResult.ErrorCount++
+			logMsg = fmt.Sprintf("无法读取 CSV 文件 %s: %v", filePath, err)
+			fmt.Println(logMsg)
+			tableResult.Logs = append(tableResult.Logs, logMsg)
+			continue
+		}
+
+		// 更新总行数和平均行大小
+		tableResult.TotalRows += len(records)
+		if len(records) > 0 {
+			fileInfo, statErr := os.Stat(filePath)
+			if statErr == nil && fileInfo.Size() > 0 {
+				rowSize := fileInfo.Size() / int64(len(records))
+				// 计算平均行大小
+				if tableResult.AverageRowSize == 0 {
+					tableResult.AverageRowSize = rowSize
+				} else {
+					tableResult.AverageRowSize = (tableResult.AverageRowSize + rowSize) / 2
+				}
+			}
+		}
+
+		// 插入数据到数据库
+		err = mysqeExec.InsertRecordsToDB(tableName, headers, records)
+		if err != nil {
+			tableResult.ErrorCount++
+			logMsg = fmt.Sprintf("无法将记录插入数据库: %v", err)
+			fmt.Println(logMsg)
+			tableResult.Logs = append(tableResult.Logs, logMsg)
+			continue
+		}
+
+		logMsg = fmt.Sprintf("完成导入文件: %s,共插入 %d 行数据。", filePath, len(records))
+		fmt.Println(logMsg)
+		tableResult.Logs = append(tableResult.Logs, logMsg)
+	}
+	return nil
+}
+
+// ReadEntireCSV 读取整个 CSV 文件到内存中
+func ReadEntireCSV(filePath string, delimiter rune) ([]string, [][]string, error) {
+	// 打开 CSV 文件
+	file, err := os.Open(filePath)
+	if err != nil {
+		return nil, nil, fmt.Errorf("无法打开文件: %v", err)
+	}
+	defer file.Close()
+
+	// 创建 CSV Reader,设置分隔符
+	reader := csv.NewReader(file)
+	reader.Comma = delimiter
+
+	// 读取所有数据
+	records, err := reader.ReadAll()
+	if err != nil {
+		return nil, nil, fmt.Errorf("无法读取 CSV 文件: %v", err)
+	}
+
+	if len(records) == 0 {
+		return nil, nil, fmt.Errorf("CSV 文件为空")
+	}
+
+	// 第一个记录是表头
+	headers := records[0]
+	dataRecords := records[1:] // 跳过表头
+
+	return headers, dataRecords, nil
+}
+
+// GetCSVFiles 获取指定目录下的所有 CSV 文件路径
+func GetCSVFiles(dirPath string) ([]string, error) {
+	var files []string
+	// 遍历目录,查找所有 .csv 文件
+	entries, err := os.ReadDir(dirPath)
+	if err != nil {
+		return nil, fmt.Errorf("无法读取目录 %s: %v", dirPath, err)
+	}
+	for _, entry := range entries {
+		if !entry.IsDir() && filepath.Ext(entry.Name()) == ".csv" {
+			files = append(files, filepath.Join(dirPath, entry.Name()))
+		}
+	}
+	return files, nil
+}
+
+// getAvailableMemory 获取可用的系统内存
+func getAvailableMemory() int64 {
+	var m runtime.MemStats
+	runtime.ReadMemStats(&m)
+	// 假设使用可用内存的 80%
+	availableMemory := int64(m.Sys - m.Alloc)
+	return availableMemory * 80 / 100
+}

+ 575 - 0
internal/services/writer_services.go

@@ -0,0 +1,575 @@
+package services
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+	"sort"
+	"sync"
+	"time"
+	"xg_fetl/internal/db_executor"
+	"xg_fetl/internal/models"
+	"xg_fetl/module/csv/csvwriter"
+)
+
+/*
+WriteMain 导出数据库表数据到 CSV 文件,基于分页和文件大小进行分片
+参数:
+*  executor: 数据库执行器,用于读取数据库表中的数据
+*  tableNames: 要导出的表名数组
+*  baseFilePath: 导出文件的基本路径,用于生成不同分片的文件名
+* delimiter: CSV 的分隔符
+*  maxFileSize: 单个 CSV 文件的最大大小
+*  pageSize: 每次分页读取的行数
+*/
+func WriteMain(
+	executor db_executor.DBExecutor,
+	tableNames map[string]models.TableInfo,
+	baseFilePath string,
+	delimiter rune,
+	maxFileSize int64,
+	pageSize int) error {
+
+	{
+		// 	--------------
+		// 	fmt.Println("--------tableName: 开始导出表数据--------", tableName)
+		// 	// 开始分页读取并导出数据
+		// 	offset := 0
+		// 	for {
+		// 		// 从数据库中分页读取数据
+		// 		headers, records, err := executor.ReadTableFromDBWithPagination(tableName, offset, pageSize)
+		// 		if err != nil {
+		// 			log.Fatalf("Failed to read table with pagination: %v", err)
+		// 			return err
+		// 		}
+		// 		fmt.Println("循环")
+		// 		// 如果没有更多数据,则结束循环
+		// 		if records == nil {
+		// 			break
+		// 		}
+
+		// 		fmt.Println("baseFilePath: ", baseFilePath)
+		// 		csvFilePath := fmt.Sprintf("%s/%s", baseFilePath, tableName)
+		// 		// 将数据写入 CSV 文件中
+		// 		err = csvwriter.WriteRecordsToCSVWithFileSizeLimit(csvFilePath, headers, records, delimiter, maxFileSize, &fileIndex, &currentFileSize, &currentFilePath)
+		// 		if err != nil {
+		// 			log.Fatalf("Failed to write records to CSV: %v", err)
+		// 			return err
+		// 		}
+
+		// 		// 增加偏移量,以获取下一页数据
+		// 		offset += pageSize
+		// 	}
+		// 	fmt.Println("--------tableName 完成--------", tableName)
+		// }
+		err := exportTableConcurrently(tableNames, baseFilePath, pageSize, delimiter, &executor, maxFileSize)
+		if err != nil {
+			fmt.Println("导出表数据失败:", err)
+			return err
+		}
+		fmt.Println("Table exported successfully with size limit and pagination")
+		//	return nil
+	}
+	return nil
+}
+
+// 定义用于接收每个表处理结果的结构体
+type TableResult struct {
+	TableName         string
+	Success           bool
+	Error             error
+	GeneratedFiles    int
+	ErrorCount        int           // 记录该表发生的错误数量
+	GoroutineCount    int           // 记录该表使用的协程数量
+	TotalRows         int           // 表的总行数
+	AverageRowSize    int64         // 平均行大小
+	ExportDuration    time.Duration // 导出所花费的时间
+	EstimatedDuration time.Duration // 估算的导出时间
+	TableSize         int64         // 表的容量(字节)
+	Logs              []string      // 保存该表的日志信息
+}
+
+// 获取当前时间字符串
+func currentTime() string {
+	return time.Now().Format("2006-01-02 15:04:05")
+}
+
+// 对表名进行排序,返回排序后的表名列表
+func sortedTableNames(tableInfos map[string]models.TableInfo) []string {
+	tableNames := make([]string, 0, len(tableInfos))
+	for name := range tableInfos {
+		tableNames = append(tableNames, name)
+	}
+	sort.Strings(tableNames)
+	return tableNames
+}
+
+// 日志消息记录函数
+func logMessage(result *TableResult, msg string, logger *log.Logger) {
+	fmt.Print(msg)
+	logger.Print(msg)
+	result.Logs = append(result.Logs, msg)
+}
+
+// 估算数据传输速率的函数
+func estimateDataTransferRate() (float64, error) {
+	// 创建临时文件
+	tmpFile, err := ioutil.TempFile("", "data_transfer_test")
+	if err != nil {
+		return 0, err
+	}
+	defer os.Remove(tmpFile.Name()) // 确保测试完成后删除临时文件
+	defer tmpFile.Close()
+
+	// 准备测试数据,大小为 50 MB
+	testDataSize := 50 * 1024 * 1024 // 50 MB
+	testData := make([]byte, testDataSize)
+
+	// 写入测试数据并测量时间
+	startTime := time.Now()
+	_, err = tmpFile.Write(testData)
+	if err != nil {
+		return 0, err
+	}
+	writeDuration := time.Since(startTime)
+
+	// 计算写入速率
+	writeRate := float64(testDataSize) / writeDuration.Seconds()
+
+	// 读取测试数据并测量时间
+	tmpFile.Seek(0, io.SeekStart)
+	readBuffer := make([]byte, testDataSize)
+	startTime = time.Now()
+	_, err = io.ReadFull(tmpFile, readBuffer)
+	if err != nil && err != io.EOF {
+		return 0, err
+	}
+	readDuration := time.Since(startTime)
+
+	// 计算读取速率
+	readRate := float64(testDataSize) / readDuration.Seconds()
+
+	// 取读写速率的平均值作为数据传输速率
+	dataTransferRate := (writeRate + readRate) / 2
+
+	return dataTransferRate, nil
+}
+
+// 将总览信息和按表分类的详细日志写入日志文件
+func writeLogFile(logFilePath string, resultsMap map[string]TableResult, tableInfos map[string]models.TableInfo) error {
+	var logBuffer bytes.Buffer
+
+	// 构建总览信息
+	logBuffer.WriteString("========== 导出结果总览 ==========\n")
+	var successTables []string
+	var failedTables []TableResult
+	for _, result := range resultsMap {
+		if result.Success {
+			successTables = append(successTables, result.TableName)
+		} else {
+			failedTables = append(failedTables, result)
+		}
+	}
+	logBuffer.WriteString(fmt.Sprintf("成功导出的表(%d 个):\n", len(successTables)))
+	for _, tableName := range successTables {
+		result := resultsMap[tableName]
+		avgRowSizeKB := float64(result.AverageRowSize) / 1024.0
+		tableSizeMB := float64(result.TableSize) / (1024.0 * 1024.0)
+		exportTime := result.ExportDuration.Seconds()
+		estimatedTime := result.EstimatedDuration.Seconds()
+		logBuffer.WriteString(fmt.Sprintf("- %s\n", tableName))
+		logBuffer.WriteString(fmt.Sprintf("  总行数:%d\n", result.TotalRows))
+		logBuffer.WriteString(fmt.Sprintf("  平均行大小:%.2f KB\n", avgRowSizeKB))
+		logBuffer.WriteString(fmt.Sprintf("  表的容量:%.2f MB\n", tableSizeMB))
+		logBuffer.WriteString(fmt.Sprintf("  估算导出耗时:%.2f 秒\n", estimatedTime))
+		logBuffer.WriteString(fmt.Sprintf("  实际导出耗时:%.2f 秒\n", exportTime))
+		logBuffer.WriteString(fmt.Sprintf("  使用协程数:%d\n", result.GoroutineCount))
+	}
+	logBuffer.WriteString("\n")
+	if len(failedTables) > 0 {
+		logBuffer.WriteString(fmt.Sprintf("导出失败的表(%d 个):\n", len(failedTables)))
+		for _, failedTable := range failedTables {
+			logBuffer.WriteString(fmt.Sprintf("- %s\n", failedTable.TableName))
+			logBuffer.WriteString(fmt.Sprintf("  错误次数:%d\n", failedTable.ErrorCount))
+			logBuffer.WriteString(fmt.Sprintf("  使用协程数:%d\n", failedTable.GoroutineCount))
+			logBuffer.WriteString(fmt.Sprintf("  错误信息:%v\n", failedTable.Error))
+		}
+	} else {
+		logBuffer.WriteString("所有表均导出成功。\n")
+	}
+	logBuffer.WriteString("\n")
+	logBuffer.WriteString("========== 详细日志信息 ==========\n")
+
+	// 按表名排序,输出每个表的详细日志信息
+	for _, tableName := range sortedTableNames(tableInfos) {
+		logBuffer.WriteString(fmt.Sprintf("------------ 表 %s ------------\n", tableName))
+		result, exists := resultsMap[tableName]
+		if exists {
+			for _, msg := range result.Logs {
+				logBuffer.WriteString(msg)
+			}
+			logBuffer.WriteString("\n")
+		} else {
+			logBuffer.WriteString(fmt.Sprintf("表 %s 的处理结果不存在。\n", tableName))
+		}
+	}
+
+	// 将新的日志内容写回日志文件
+	err := ioutil.WriteFile(logFilePath, logBuffer.Bytes(), 0644)
+	if err != nil {
+		return fmt.Errorf("无法写入日志文件: %v", err)
+	}
+
+	return nil
+}
+
+/**
+ * exportTableConcurrently 并发地导出多个表的数据到 CSV 文件。
+ *
+ * 参数:
+ * - tableInfos: 包含表信息的映射,键为表名,值为 TableInfo。
+ * - baseFilePath: 基础文件路径,用于存储导出的 CSV 文件。
+ * - pageSize: 分页大小,决定每次从数据库读取的记录数量。
+ * - delimiter: CSV 文件的分隔符。
+ * - executor: 数据库执行器,用于执行数据库查询。
+ * - maxFileSizeMB: 单个 CSV 文件的最大大小(以 MB 为单位)。
+ *
+ * 返回值:
+ * - error: 如果发生错误,返回错误信息。
+ */
+func exportTableConcurrently(
+	tableInfos map[string]models.TableInfo,
+	baseFilePath string,
+	pageSize int,
+	delimiter rune,
+	executor *db_executor.DBExecutor,
+	maxFileSizeMB int64,
+) error {
+	var (
+		wg                        sync.WaitGroup
+		resultsMap                = make(map[string]TableResult)
+		resultsMutex              sync.Mutex // 用于保护 resultsMap 的并发访问
+		maxConcurrentTableExports = 3
+		tableSemaphore            = make(chan struct{}, maxConcurrentTableExports)
+		maxConcurrentFileExports  = 100
+	)
+
+	// 创建日志文件
+	logFilePath := filepath.Join(baseFilePath, "export.log")
+	logFile, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+	if err != nil {
+		return fmt.Errorf("无法打开日志文件: %v", err)
+	}
+	defer logFile.Close()
+
+	// 创建日志记录器,不添加任何前缀或标志
+	logger := log.New(logFile, "", 0)
+
+	// 估算数据传输速率
+	dataTransferRate, err := estimateDataTransferRate()
+	if err != nil {
+		msg := fmt.Sprintf("%s 无法估算数据传输速率,默认使用 50 MB/s。错误信息:%v\n", currentTime(), err)
+		fmt.Print(msg)
+		logger.Print(msg)
+		dataTransferRate = 50 * 1024 * 1024 // 默认 50 MB/s
+	} else {
+		msg := fmt.Sprintf("%s 估算的数据传输速率为:%.2f MB/s\n", currentTime(), dataTransferRate/(1024*1024))
+		fmt.Print(msg)
+		logger.Print(msg)
+	}
+
+	// 遍历每个表进行处理,使用协程
+	for _, tableName := range sortedTableNames(tableInfos) {
+		tableInfo := tableInfos[tableName]
+		wg.Add(1)
+		go func(tableInfo models.TableInfo) {
+			tableSemaphore <- struct{}{} // 获取表级信号量
+			defer func() {
+				<-tableSemaphore // 释放表级信号量
+				wg.Done()
+			}()
+
+			tableName := tableInfo.Name
+			totalRecords := tableInfo.Count
+
+			// 创建一个 TableResult 来保存该表的结果和日志
+			result := TableResult{
+				TableName:      tableName,
+				TotalRows:      totalRecords,
+				GoroutineCount: 1, // 包含当前协程
+			}
+
+			// 创建存储当前表的文件夹
+			tableDir := filepath.Join(baseFilePath, tableName)
+			if err := os.MkdirAll(tableDir, 0755); err != nil {
+				msg := fmt.Sprintf("%s 创建表 %s 文件夹失败: %v\n", currentTime(), tableName, err)
+				logMessage(&result, msg, logger)
+
+				result.Success = false
+				result.Error = err
+				result.ErrorCount = 1
+
+				resultsMutex.Lock()
+				resultsMap[tableName] = result
+				resultsMutex.Unlock()
+				return
+			}
+
+			// 估算每条记录的平均大小
+			averageRecordSize, err := estimateAverageRecordSize(*executor, tableName)
+			if err != nil {
+				msg := fmt.Sprintf("%s 无法估算表 %s 平均记录大小: %v\n", currentTime(), tableName, err)
+				logMessage(&result, msg, logger)
+
+				result.Success = false
+				result.Error = err
+				result.ErrorCount = 1
+
+				resultsMutex.Lock()
+				resultsMap[tableName] = result
+				resultsMutex.Unlock()
+				return
+			}
+			result.AverageRowSize = averageRecordSize
+
+			msg := fmt.Sprintf("%s 表 %s 的平均记录大小: %d 字节\n", currentTime(), tableName, averageRecordSize)
+			logMessage(&result, msg, logger)
+
+			// 计算表的容量
+			tableSize := averageRecordSize * int64(totalRecords)
+			result.TableSize = tableSize
+			tableSizeMB := float64(tableSize) / (1024.0 * 1024.0)
+			msg = fmt.Sprintf("%s 表 %s 的容量约为:%.2f MB\n", currentTime(), tableName, tableSizeMB)
+			logMessage(&result, msg, logger)
+
+			// 估算导出时间
+			estimatedDurationSeconds := float64(tableSize) / dataTransferRate
+			estimatedDuration := time.Duration(estimatedDurationSeconds * float64(time.Second))
+			result.EstimatedDuration = estimatedDuration
+
+			msg = fmt.Sprintf("%s 表 %s 估算导出时间:%.2f 秒\n", currentTime(), tableName, estimatedDurationSeconds)
+			logMessage(&result, msg, logger)
+
+			// 计算每个 CSV 文件应包含的记录数
+			maxFileSizeBytes := maxFileSizeMB * 1024 * 1024 // 将 MB 转换为字节
+			recordsPerFile := int(maxFileSizeBytes / averageRecordSize)
+			if recordsPerFile <= 0 {
+				recordsPerFile = 1000 // 设置一个最小值,避免 recordsPerFile 为 0
+			}
+			msg = fmt.Sprintf("%s 表 %s 的每个文件应包含的记录数: %d\n", currentTime(), tableName, recordsPerFile)
+			logMessage(&result, msg, logger)
+
+			// 计算需要的 CSV 文件数量
+			totalFiles := (totalRecords + recordsPerFile - 1) / recordsPerFile
+			msg = fmt.Sprintf("%s 表 %s 需要的 CSV 文件数量: %d\n", currentTime(), tableName, totalFiles)
+			logMessage(&result, msg, logger)
+
+			// 记录导出开始时间
+			startTime := time.Now()
+
+			var fileWg sync.WaitGroup
+			fileSemaphore := make(chan struct{}, maxConcurrentFileExports)
+			maxRetries := 3
+
+			// 错误计数器
+			var errorCount int32
+
+			// 为每个 CSV 文件启动一个协程
+			for fileIndex := 0; fileIndex < totalFiles; fileIndex++ {
+				fileWg.Add(1)
+				result.GoroutineCount++ // 增加协程计数器
+
+				go func(fileIndex int) {
+					fileSemaphore <- struct{}{} // 获取文件级信号量
+					defer func() {
+						<-fileSemaphore // 释放文件级信号量
+						fileWg.Done()
+					}()
+
+					offset := fileIndex * recordsPerFile
+					limit := recordsPerFile
+					if offset+limit > totalRecords {
+						limit = totalRecords - offset
+					}
+
+					csvFilePath := filepath.Join(tableDir, fmt.Sprintf("%s_%d.csv", tableName, fileIndex+1))
+
+					attempt := 0
+					for {
+						attempt++
+						err := exportDataRangeToCSV(*executor, tableName, csvFilePath, delimiter, offset, limit, pageSize)
+						if err != nil {
+							if attempt < maxRetries {
+								msg := fmt.Sprintf("%s 表 %s 导出到文件 %s 时发生错误 (第 %d 次尝试): %v,重试...\n", currentTime(), tableName, csvFilePath, attempt, err)
+								logMessage(&result, msg, logger)
+								time.Sleep(2 * time.Second)
+								continue
+							} else {
+								msg := fmt.Sprintf("%s 表 %s 导出到文件 %s 时发生错误 (第 %d 次尝试): %v,已达最大重试次数。\n", currentTime(), tableName, csvFilePath, attempt, err)
+								logMessage(&result, msg, logger)
+								// 增加错误计数器
+								errorCount++
+								break
+							}
+						} else {
+							msg := fmt.Sprintf("%s 表 %s 的文件 %s 导出完成。\n", currentTime(), tableName, filepath.Base(csvFilePath))
+							logMessage(&result, msg, logger)
+							break
+						}
+					}
+				}(fileIndex)
+			}
+
+			// 等待当前表的所有文件导出完成
+			fileWg.Wait()
+
+			// 计算实际导出耗时
+			exportDuration := time.Since(startTime)
+			result.ExportDuration = exportDuration
+
+			msg = fmt.Sprintf("%s 表 %s 导出完成,共生成 %d 个文件,使用了 %d 个协程。\n", currentTime(), tableName, totalFiles, result.GoroutineCount)
+			logMessage(&result, msg, logger)
+
+			success := errorCount == 0
+			var finalError error
+			if !success {
+				finalError = fmt.Errorf("表 %s 导出过程中发生 %d 个错误", tableName, errorCount)
+			}
+
+			result.Success = success
+			result.Error = finalError
+			result.ErrorCount = int(errorCount)
+			result.GeneratedFiles = totalFiles
+
+			resultsMutex.Lock()
+			resultsMap[tableName] = result
+			resultsMutex.Unlock()
+		}(tableInfo)
+	}
+
+	// 等待所有表处理完成
+	wg.Wait()
+
+	// 写入日志文件
+	err = writeLogFile(logFilePath, resultsMap, tableInfos)
+	if err != nil {
+		return fmt.Errorf("无法写入日志文件: %v", err)
+	}
+
+	return nil
+}
+
+/**
+ * estimateAverageRecordSize 估算指定表中每条记录的平均大小(以字节为单位)。
+ *
+ * 参数:
+ * - executor: 数据库执行器,用于执行数据库查询。
+ * - tableName: 需要估算的表名。
+ *
+ * 返回值:
+ * - int64: 平均每条记录的大小(字节)。
+ * - error: 如果发生错误,返回错误信息。
+ */
+func estimateAverageRecordSize(executor db_executor.DBExecutor, tableName string) (int64, error) {
+	// 随机抽取一定数量的记录,计算平均大小
+	sampleSize := 100 // 抽样的记录数
+	offset := 0       // 从第 0 条记录开始抽样
+
+	// 使用 ReadTableFromDBWithPagination 函数获取样本数据
+	_, records, err := executor.ReadTableFromDBWithPagination(tableName, offset, sampleSize)
+	if err != nil {
+		return 0, fmt.Errorf("failed to query table: %v", err)
+	}
+
+	// 检查是否获取到了数据
+	if records == nil || len(records) == 0 {
+		return 0, fmt.Errorf("没有获取到抽样记录,表数量可能为 0")
+	}
+
+	var totalSize int64 = 0 // 总记录大小
+	var count int64 = 0     // 记录数
+
+	// 遍历每一条记录,计算其大小
+	for _, record := range records {
+		var recordSize int64 = 0
+		for _, val := range record {
+			if val != "" {
+				recordSize += int64(len(val)) // 累加字段值的长度
+			}
+		}
+		totalSize += recordSize // 累加每条记录的大小
+		count++
+	}
+
+	if count == 0 {
+		return 0, fmt.Errorf("没有获取到抽样记录,表数量可能为 0")
+	}
+
+	averageSize := totalSize / count // 计算平均记录大小
+	return averageSize, nil
+}
+
+// exportDataRangeToCSV 导出指定范围的数据到 CSV 文件
+// 参数:
+// - executor: 数据库执行器
+// - tableName: 表名
+// - csvFilePath: CSV 文件路径
+// - delimiter: CSV 分隔符
+// - offset: 数据起始偏移量
+// - limit: 数据读取限制数量
+// - pageSize: 分页大小
+// 返回值:
+// - error: 如果发生错误,返回错误信息
+func exportDataRangeToCSV(
+	executor db_executor.DBExecutor,
+	tableName string,
+	csvFilePath string,
+	delimiter rune,
+	offset int,
+	limit int,
+	pageSize int,
+) error {
+	// 首先确保 CSV 文件存在,创建新文件并写入表头
+	var headers []string
+	var headersWritten bool = false
+
+	// 计算需要分页的次数
+	totalPages := (limit + pageSize - 1) / pageSize
+
+	for page := 0; page < totalPages; page++ {
+		pageOffset := offset + page*pageSize
+		pageLimit := pageSize
+		if pageOffset+pageLimit > offset+limit {
+			pageLimit = (offset + limit) - pageOffset
+		}
+
+		// 从数据库读取数据
+		currentHeaders, records, err := executor.ReadTableFromDBWithPagination(tableName, pageOffset, pageLimit)
+		if err != nil {
+			return fmt.Errorf("failed to read data from table %s: %v", tableName, err)
+		}
+
+		// 仅在首次获取到表头时进行赋值
+		if !headersWritten && currentHeaders != nil {
+			headers = currentHeaders
+			headersWritten = true
+		}
+
+		// 判断是否需要写入表头
+		appendToFile := page > 0 // 第一页时创建新文件,之后的页追加写入
+
+		// 写入数据到 CSV 文件
+		err = csvwriter.WriteRecordsToCSV(csvFilePath, headers, records, delimiter, appendToFile)
+		if err != nil {
+			return fmt.Errorf("failed to write data to CSV: %v", err)
+		}
+
+		fmt.Printf("表 %s 文件 %s 已写入 %d/%d 页。\n", tableName, filepath.Base(csvFilePath), page+1, totalPages)
+	}
+
+	return nil
+}

+ 32 - 0
internal/utils/utils.go

@@ -0,0 +1,32 @@
+package utils
+
+import (
+	"fmt"
+	"os"
+)
+
+// CreateDirectory 尝试创建指定路径的文件夹
+// 如果文件夹已存在,则返回错误
+func CreateDirectory(path string) error {
+	// 使用 os.Stat 检查文件夹是否存在
+	_, err := os.Stat(path)
+	if err == nil {
+		// 如果文件夹已存在,返回自定义错误
+		return fmt.Errorf("目录 %s 已存在", path)
+	}
+
+	// 如果错误不是 "文件不存在",则返回该错误
+	if !os.IsNotExist(err) {
+		return err
+	}
+
+	// 如果文件夹不存在,创建文件夹
+	err = os.Mkdir(path, 0755) // 0755 表示文件夹权限
+	if err != nil {
+		return fmt.Errorf("创建目录 %s 失败: %v", path, err)
+	}
+
+	return nil
+}
+
+

+ 21 - 0
main.go

@@ -0,0 +1,21 @@
+package main
+
+import (
+	"fmt"
+	"xg_fetl/cmd"
+	"xg_fetl/internal/config"
+	"xg_fetl/internal/global"
+
+	_ "github.com/go-sql-driver/mysql" // 使用 MySQL 驱动
+)
+
+func main() {
+	global.InitAll()
+	cfg, err := config.LoadConfig()
+	if err != nil {
+		fmt.Println("load config error", err)
+	}
+	fmt.Println(cfg)
+
+	cmd.Execute()
+}

+ 75 - 0
module/csv/csvreader/csvreader.go

@@ -0,0 +1,75 @@
+package csvreader
+
+import (
+	"encoding/csv"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+)
+
+// GetCSVFiles 获取指定目录下的所有 CSV 文件路径
+func GetCSVFiles(dirPath string) ([]string, error) {
+	var files []string
+	err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		if !info.IsDir() && filepath.Ext(path) == ".csv" {
+			files = append(files, path)
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, fmt.Errorf("failed to list CSV files in directory: %v", err)
+	}
+	return files, nil
+}
+
+// ReadCSVWithLimit 分页从 CSV 文件中读取数据,跳过表头
+func ReadCSVWithLimit(filePath string, delimiter rune, offset int, limit int) ([]string, [][]string, error) {
+	// 打开 CSV 文件
+	file, err := os.Open(filePath)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to open file: %v", err)
+	}
+	defer file.Close()
+
+	// 创建 CSV Reader
+	reader := csv.NewReader(file)
+	reader.Comma = delimiter
+
+	// 读取表头
+	headers, err := reader.Read()
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to read headers from CSV: %v", err)
+	}
+
+	// 如果这是第一次读取,即 offset 为 0,直接从第 1 行数据开始读取
+	if offset == 0 {
+		offset++ // 表头已经读取,跳过表头
+	}
+
+	// 跳过 offset 指定的行数
+	for i := 1; i < offset; i++ {
+		if _, err := reader.Read(); err == io.EOF {
+			return nil, nil, nil // 如果已经读到文件末尾,返回空数据
+		} else if err != nil {
+			return nil, nil, fmt.Errorf("failed to skip rows in CSV: %v", err)
+		}
+	}
+
+	// 读取记录
+	records := make([][]string, 0, limit)
+	for i := 0; i < limit; i++ {
+		record, err := reader.Read()
+		if err == io.EOF {
+			break // 如果到达文件末尾,停止读取
+		} else if err != nil {
+			return nil, nil, fmt.Errorf("failed to read row from CSV: %v", err)
+		}
+		records = append(records, record)
+	}
+
+	return headers, records, nil
+}

+ 210 - 0
module/csv/csvwriter/csvwriter.go

@@ -0,0 +1,210 @@
+package csvwriter
+
+import (
+	"encoding/csv"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+)
+
+// WriteRecordsToCSV 将数据写入 CSV 文件
+// 参数:
+// - filePath: CSV 文件路径
+// - headers: 表头字段列表
+// - records: 要写入的记录列表,每个记录是一个字符串切片
+// - delimiter: CSV 文件的分隔符
+// - appendToFile: 是否追加写入,true 表示在文件末尾追加,false 表示新建文件
+// 返回值:
+// - error: 如果发生错误,返回错误信息
+func WriteRecordsToCSV(filePath string, headers []string, records [][]string, delimiter rune, appendToFile bool) error {
+	var file *os.File
+	var err error
+
+	if appendToFile {
+		// 追加写入
+		file, err = os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644)
+		if err != nil {
+			return fmt.Errorf("failed to open file for appending: %v", err)
+		}
+	} else {
+		// 新建文件
+		file, err = os.Create(filePath)
+		if err != nil {
+			return fmt.Errorf("failed to create file: %v", err)
+		}
+	}
+	defer file.Close()
+
+	writer := csv.NewWriter(file)
+	writer.Comma = delimiter
+	defer writer.Flush()
+
+	// 如果是新文件且有表头,需要写入表头
+	if !appendToFile && headers != nil {
+		if err := writer.Write(headers); err != nil {
+			return fmt.Errorf("failed to write headers to CSV: %v", err)
+		}
+	}
+
+	// 写入记录
+	if err := writer.WriteAll(records); err != nil {
+		return fmt.Errorf("failed to write records to CSV: %v", err)
+	}
+
+	return nil
+}
+
+/*
+WriteRecordsToCSVWithFileSizeLimit 将数据写入 CSV 文件,并基于文件大小进行分片
+参数:
+- baseFilePath: CSV 文件的基本路径,用于生成不同分片的文件名
+- headers: 表的列名,用于 CSV 表头
+- records: 要写入的表记录
+- delimiter: CSV 的分隔符
+- maxFileSizeMB: 单个 CSV 文件的最大大小 MB
+- fileIndex: 当前文件的索引号,用于管理分片文件的命名
+- currentFileSize: 当前文件的大小,用于判断是否需要创建新文件
+- currentFilePath: 当前正在写入的文件路径,用于管理文件的创建和打开
+返回值:
+- error: 如果写入过程中有错误,返回相关的错误信息
+*/
+func WriteRecordsToCSVWithFileSizeLimit(baseFilePath string, headers []string, records [][]string, delimiter rune, maxFileSizeMB int64, fileIndex *int, currentFileSize *int64, currentFilePath *string) error {
+	var file *os.File
+	var writer *csv.Writer
+
+	// 将 maxFileSize 从 MB 转换为字节
+	maxFileSize := maxFileSizeMB * 1024 * 1024
+
+	// 检查是否需要创建新文件
+	if *currentFileSize == 0 {
+		// 如果是第一次写入,创建新文件
+		*currentFilePath = filepath.Join(baseFilePath + "_" + strconv.Itoa(*fileIndex) + ".csv")
+		var err error
+		file, err = os.Create(*currentFilePath)
+		if err != nil {
+			return fmt.Errorf("failed to create file: %v", err)
+		}
+		defer file.Close()
+
+		writer = csv.NewWriter(file)
+		writer.Comma = delimiter // 设置 CSV 分隔符
+		defer writer.Flush()
+
+		// 写入表头到 CSV 文件
+		if err := writer.Write(headers); err != nil {
+			return fmt.Errorf("failed to write header to CSV: %v", err)
+		}
+		*currentFileSize += estimateRecordSize(headers, delimiter)
+	} else {
+		// 如果文件已存在,追加写入
+		var err error
+		file, err = os.OpenFile(*currentFilePath, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
+		if err != nil {
+			return fmt.Errorf("failed to open file: %v", err)
+		}
+		defer file.Close()
+
+		writer = csv.NewWriter(file)
+		writer.Comma = delimiter // 设置 CSV 分隔符
+		defer writer.Flush()
+	}
+
+	// 遍历记录逐行写入 CSV 文件
+	for _, record := range records {
+		// 如果文件大小超过限制,创建新文件
+		if *currentFileSize > maxFileSize {
+			writer.Flush()
+			file.Close()
+
+			// 增加文件索引,创建新的文件
+			*fileIndex++
+			*currentFilePath = filepath.Join(baseFilePath + "_" + strconv.Itoa(*fileIndex) + ".csv")
+			var err error
+			file, err = os.Create(*currentFilePath)
+			if err != nil {
+				return fmt.Errorf("failed to create file: %v", err)
+			}
+
+			writer = csv.NewWriter(file)
+			writer.Comma = delimiter // 设置 CSV 分隔符
+			defer writer.Flush()
+
+			// 写入表头到新文件
+			if err := writer.Write(headers); err != nil {
+				return fmt.Errorf("failed to write header to new CSV: %v", err)
+			}
+			*currentFileSize = estimateRecordSize(headers, delimiter)
+		}
+
+		// 写入当前记录
+		if err := writer.Write(record); err != nil {
+			return fmt.Errorf("failed to write row to CSV: %v", err)
+		}
+
+		// 更新当前文件大小
+		*currentFileSize += estimateRecordSize(record, delimiter)
+	}
+
+	return nil
+}
+
+// WriteRecordsAppendToCSVFile 将数据写入 CSV 文件
+func WriteRecordsAppendToCSVFile(filePath string, headers []string, records [][]string, delimiter rune, appendToFile bool) error {
+	var file *os.File
+	var err error
+
+	if appendToFile {
+		// 追加写入
+		file, err = os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
+		if err != nil {
+			return fmt.Errorf("failed to open file for appending: %v", err)
+		}
+	} else {
+		// 新建文件
+		file, err = os.Create(filePath)
+		if err != nil {
+			return fmt.Errorf("failed to create file: %v", err)
+		}
+		// 写入表头
+		writer := csv.NewWriter(file)
+		writer.Comma = delimiter
+		defer writer.Flush()
+
+		if err := writer.Write(headers); err != nil {
+			return fmt.Errorf("failed to write header to CSV: %v", err)
+		}
+	}
+
+	defer file.Close()
+	writer := csv.NewWriter(file)
+	writer.Comma = delimiter
+	defer writer.Flush()
+
+	// 写入记录
+	if err := writer.WriteAll(records); err != nil {
+		return fmt.Errorf("failed to write records to CSV: %v", err)
+	}
+
+	return nil
+}
+
+// EstimateRecordSize 估算一条记录在 CSV 中的大小
+// 参数:
+// - record: 要估算的 CSV 记录
+// - delimiter: CSV 的分隔符
+// 返回值:
+// - 记录的估算大小,以字节为单位
+func estimateRecordSize(record []string, delimiter rune) int64 {
+	size := int64(0)
+	for i, field := range record {
+		size += int64(len(field))
+		// 每个字段之间的分隔符也会占用空间
+		if i < len(record)-1 {
+			size++
+		}
+	}
+	// 加上换行符
+	size++
+	return size
+}