diff --git a/docs/en/connector-v2/sink/SQLServer.md b/docs/en/connector-v2/sink/SQLServer.md
new file mode 100644
index 00000000000..d5cf8e4b173
--- /dev/null
+++ b/docs/en/connector-v2/sink/SQLServer.md
@@ -0,0 +1,159 @@
+# SQL Server
+
+> JDBC SQL Server Sink Connector
+
+## Support those engines
+
+> Spark
+> Flink
+> Seatunnel Zeta
+
+## Key features
+
+- [x] [exactly-once](../../concept/connector-v2-features.md)
+- [x] [cdc](../../concept/connector-v2-features.md)
+
+> Use `Xa transactions` to ensure `exactly-once`. So only support `exactly-once` for the database which is
+> support `Xa transactions`. You can set `is_exactly_once=true` to enable it.
+
+## Description
+
+Write data through jdbc. Support Batch mode and Streaming mode, support concurrent writing, support exactly-once
+semantics (using XA transaction guarantee).
+
+## Supported DataSource Info
+
+| datasource | supported versions | driver | url | maven |
+|------------|----------------------------------------------------------|----------------------------------------------|---------------------------------|-----------------------------------------------------------------------------------|
+| SQL Server | Different dependency version has different driver class. | com.microsoft.sqlserver.jdbc.SQLServerDriver | jdbc:sqlserver://localhost:1433 | [Download](https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc) |
+
+## Database dependency
+
+> Please download the support list corresponding to 'Maven' and copy it to the '$SEATNUNNEL_HOME/plugins/jdbc/lib/' working directory
+> For example SQL Server datasource: cp mssql-jdbc-xxx.jar $SEATNUNNEL_HOME/plugins/jdbc/lib/
+
+## Data Type Mapping
+
+| SQLserver Data type | Seatunnel Data type |
+|-----------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|
+| BIT | BOOLEAN |
+| TINYINT
SMALLINT | SHORT |
+| INTEGER | INT |
+| BIGINT | LONG |
+| DECIMAL
NUMERIC
MONEY
SMALLMONEY | DECIMAL((Get the designated column's specified column size)+1,
(Gets the designated column's number of digits to right of the
decimal point.))) |
+| REAL | FLOAT |
+| FLOAT | DOUBLE |
+| CHAR
NCHAR
VARCHAR
NTEXT
NVARCHAR
TEXT | STRING |
+| DATE | LOCAL_DATE |
+| TIME | LOCAL_TIME |
+| DATETIME
DATETIME2
SMALLDATETIME
DATETIMEOFFSET | LOCAL_DATE_TIME |
+| TIMESTAMP
BINARY
VARBINARY
IMAGE
UNKNOWN | Not supported yet |
+
+## Options
+
+| name | type | required | default | Description |
+|------------------------------|--------|----------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| url | String | Yes | - | The URL of the JDBC connection. Refer to a case: jdbc:sqlserver://localhost:1433 |
+| driver | String | Yes | - | The jdbc class name used to connect to the remote data source,
if you use SQLserver the value is `com.microsoft.sqlserver.jdbc.SQLServerDriver`. |
+| user | String | No | - | Connection instance user name |
+| password | String | No | - | Connection instance password |
+| query | String | Yes | - | Query statement |
+| connection_check_timeout_sec | Int | No | 30 | The time in seconds to wait for the database operation used to validate the connection to complete |
+| partition_column | String | No | - | The column name for parallelism's partition, only support numeric type. |
+| partition_lower_bound | Long | No | - | The partition_column min value for scan, if not set SeaTunnel will query database get min value. |
+| partition_upper_bound | Long | No | - | The partition_column max value for scan, if not set SeaTunnel will query database get max value. |
+| partition_num | Int | No | job parallelism | The number of partition count, only support positive integer. default value is job parallelism |
+| fetch_size | Int | No | 0 | For queries that return a large number of objects,you can configure
the row fetch size used in the query toimprove performance by
reducing the number database hits required to satisfy the selection criteria.
Zero means use jdbc default value. |
+| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details |
+
+## tips
+
+> If partition_column is not set, it will run in single concurrency, and if partition_column is set, it will be executed in parallel according to the concurrency of tasks.
+
+## Task Example
+
+### simple:
+
+> This example defines a SeaTunnel synchronization task that automatically generates data through FakeSource and sends it to JDBC Sink. FakeSource generates a total of 16 rows of data (row.num=16), with each row having two fields, name (string type) and age (int type). The final target table is test_table will also be 16 rows of data in the table. Before run this job, you need create database test and table test_table in your SQL Server. And if you have not yet installed and deployed SeaTunnel, you need to follow the instructions in [Install SeaTunnel](../../start-v2/locally/deployment.md) to install and deploy SeaTunnel. And then follow the instructions in [Quick Start With SeaTunnel Engine](../../start-v2/locally/quick-start-seatunnel-engine.md) to run this job.
+
+```
+# Defining the runtime environment
+env {
+ # You can set flink configuration here
+ execution.parallelism = 1
+ job.mode = "BATCH"
+}
+
+source {
+ # This is a example source plugin **only for test and demonstrate the feature source plugin**
+ FakeSource {
+ parallelism = 1
+ result_table_name = "fake"
+ row.num = 16
+ schema = {
+ fields {
+ name = "string"
+ age = "int"
+ }
+ }
+ }
+ # If you would like to get more information about how to configure seatunnel and see full list of source plugins,
+ # please go to https://seatunnel.apache.org/docs/category/source-v2
+}
+
+transform {
+ # If you would like to get more information about how to configure seatunnel and see full list of transform plugins,
+ # please go to https://seatunnel.apache.org/docs/category/transform-v2
+}
+
+sink {
+ jdbc {
+ url = "jdbc:sqlserver://localhost:1433;databaseName=mydatabase"
+ driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+ user = "root"
+ password = "123456"
+ query = "insert into test_table(name,age) values(?,?)"
+ }
+ # If you would like to get more information about how to configure seatunnel and see full list of sink plugins,
+ # please go to https://seatunnel.apache.org/docs/category/sink-v2
+}
+```
+
+### Exactly-once :
+
+> For accurate write scene we guarantee accurate once
+
+```
+jdbc {
+ url = "jdbc:sqlserver://localhost:1433;databaseName=testdb"
+ driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+
+ max_retries = 0
+ user = "root"
+ password = "123456"
+ query = "insert into test_table(name,age) values(?,?)"
+
+ is_exactly_once = "true"
+
+ xa_data_source_class_name = "com.sqlserver.cj.jdbc.SqlServerXADataSource"
+}
+```
+
+### CDC(Change data capture) event
+
+> CDC change data is also supported by us In this case, you need config database, table and primary_keys.
+
+```
+jdbc {
+ url = "jdbc:sqlserver://localhost:1433;databaseName=testdb"
+ driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+ user = "root"
+ password = "123456"
+
+ # You need to configure both database and table
+ database = test
+ table = sink_table
+ primary_keys = ["id","name"]
+}
+```
+
diff --git a/docs/en/connector-v2/sink/redshift.md b/docs/en/connector-v2/sink/redshift.md
new file mode 100644
index 00000000000..8d486dfcf80
--- /dev/null
+++ b/docs/en/connector-v2/sink/redshift.md
@@ -0,0 +1,159 @@
+# Redshift
+
+> JDBC Redshift sink Connector
+
+## Support those engines
+
+> Spark
+> Flink
+> Seatunnel Zeta
+
+## Key features
+
+- [x] [exactly-once](../../concept/connector-v2-features.md)
+- [x] [cdc](../../concept/connector-v2-features.md)
+
+> Use `Xa transactions` to ensure `exactly-once`. So only support `exactly-once` for the database which is support `Xa transactions`. You can set `is_exactly_once=true` to enable it.
+
+## Description
+
+Write data through jdbc.
+
+## Supported DataSource Info
+
+| datasource | supported versions | driver | url | maven |
+|------------|----------------------------------------------------------|---------------------------------|-----------------------------------------|------------------------------------------------------------------------------------|
+| redshift | Different dependency version has different driver class. | com.amazon.redshift.jdbc.Driver | jdbc:redshift://localhost:5439/database | [Download](https://mvnrepository.com/artifact/com.amazon.redshift/redshift-jdbc42) |
+
+## Database dependency
+
+> Please download the support list corresponding to 'Maven' and copy it to the '$SEATNUNNEL_HOME/plugins/jdbc/lib/' working directory
+> For example Redshift datasource: cp RedshiftJDBC42-xxx.jar $SEATNUNNEL_HOME/plugins/jdbc/lib/
+
+## Data Type Mapping
+
+| Redshift Data type | Seatunnel Data type |
+|-------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|
+| SMALLINT
INT2 | SHORT |
+| INTEGER
INT
INT4 | INT |
+| BIGINT
INT8
OID | LONG |
+| DECIMAL
NUMERIC | DECIMAL((Get the designated column's specified column size)+1,
(Gets the designated column's number of digits to right of the decimal point.))) |
+| REAL
FLOAT4 | FLOAT |
+| DOUBLE_PRECISION
FLOAT8
FLOAT | DOUBLE |
+| BOOLEAN
BOOL | BOOLEAN |
+| CHAR
CHARACTER
NCHAR
BPCHAR
VARCHAR
CHARACTER_VARYING
NVARCHAR
TEXT
SUPER | STRING |
+| DATE | LOCALDATE |
+| TIME
TIME_WITH_TIME_ZONE
TIMETZ | LOCALTIME |
+| TIMESTAMP
TIMESTAMP_WITH_OUT_TIME_ZONE
TIMESTAMPTZ | LOCALDATETIME |
+| GEOMETRY | Not supported yet |
+
+## Options
+
+| name | type | required | default | description |
+|------------------------------|--------|----------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| url | String | Yes | - | The URL of the JDBC connection. Refer to a case: jdbc:redshift://localhost:5439/database |
+| driver | String | Yes | - | The jdbc class name used to connect to the remote data source,
if you use Redshift the value is `com.amazon.redshift.jdbc.Driver`. |
+| user | String | No | - | Connection instance user name |
+| password | String | No | - | Connection instance password |
+| query | String | Yes | - | Query statement |
+| connection_check_timeout_sec | Int | No | 30 | The time in seconds to wait for the database operation used to validate the connection to complete |
+| partition_column | String | No | - | The column name for parallelism's partition, only support numeric type,Only support numeric type primary key, and only can config one column. |
+| partition_lower_bound | Long | No | - | The partition_column min value for scan, if not set SeaTunnel will query database get min value. |
+| partition_upper_bound | Long | No | - | The partition_column max value for scan, if not set SeaTunnel will query database get max value. |
+| partition_num | Int | No | job parallelism | The number of partition count, only support positive integer. default value is job parallelism |
+| fetch_size | Int | No | 0 | For queries that return a large number of objects,you can configure
the row fetch size used in the query toimprove performance by
reducing the number database hits required to satisfy the selection criteria.
Zero means use jdbc default value. |
+| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details |
+
+## tips
+
+> If partition_column is not set, it will run in single concurrency, and if partition_column is set, it will be executed in parallel according to the concurrency of tasks.
+
+## Task Example
+
+### simple:
+
+> This example defines a SeaTunnel synchronization task that automatically generates data through FakeSource and sends it to JDBC Sink. FakeSource generates a total of 16 rows of data (row.num=16), with each row having two fields, name (string type) and age (int type). The final target table is test_table will also be 16 rows of data in the table. Before run this job, you need create database mydatabase and table test_table in your Redshift. And if you have not yet installed and deployed SeaTunnel, you need to follow the instructions in [Install SeaTunnel](https://github.com/apache/incubator-seatunnel/blob/d013b5d2ac79f567077b62ceb4d247abf805ffdf/docs/en/start-v2/locally/deployment.md) to install and deploy SeaTunnel. And then follow the instructions in [Quick Start With SeaTunnel Engine](https://github.com/apache/incubator-seatunnel/blob/d013b5d2ac79f567077b62ceb4d247abf805ffdf/docs/en/start-v2/locally/quick-start-seatunnel-engine.md) to run this job.
+
+```
+# Defining the runtime environment
+env {
+ # You can set flink configuration here
+ execution.parallelism = 1
+ job.mode = "BATCH"
+}
+
+source {
+ # This is a example source plugin **only for test and demonstrate the feature source plugin**
+ FakeSource {
+ parallelism = 1
+ result_table_name = "fake"
+ row.num = 16
+ schema = {
+ fields {
+ name = "string"
+ age = "int"
+ }
+ }
+ }
+ # If you would like to get more information about how to configure seatunnel and see full list of source plugins,
+ # please go to https://seatunnel.apache.org/docs/category/source-v2
+}
+
+transform {
+ # If you would like to get more information about how to configure seatunnel and see full list of transform plugins,
+ # please go to https://seatunnel.apache.org/docs/category/transform-v2
+}
+
+sink {
+ jdbc {
+ url = "jdbc:redshift://localhost:5439/mydatabase"
+ driver = "com.mysql.cj.jdbc.Driver"
+ user = "myUser"
+ password = "myPassword"
+ query = "insert into test_table(name,age) values(?,?)"
+ }
+ # If you would like to get more information about how to configure seatunnel and see full list of sink plugins,
+ # please go to https://seatunnel.apache.org/docs/category/sink-v2
+}
+```
+
+### Exactly-once :
+
+> For accurate write scene we guarantee accurate once
+
+```
+sink {
+ jdbc {
+ url = "jdbc:redshift://localhost:5439/mydatabase"
+ driver = "com.amazon.redshift.jdbc.Driver"
+ user = "myUser"
+ password = "myPassword"
+ max_retries = 0
+ query = "insert into mytable(name,age) values(?,?)"
+ is_exactly_once = true
+ xa_data_source_class_name = "com.amazon.redshift.jdbc.RedshiftXADataSource"
+ }
+}
+```
+
+### CDC(Change data capture) event
+
+> CDC change data is also supported by us In this case, you need config database, table and primary_keys.
+
+```
+sink {
+ jdbc {
+ url = "jdbc:redshift://localhost:5439/mydatabase"
+ driver = "com.amazon.redshift.jdbc.Driver"
+ user = "myUser"
+ password = "mypassword"
+
+ generate_sink_sql = true
+ # You need to configure both schema and table
+ schema = "public"
+ table = "sink_table"
+ primary_keys = ["id","name"]
+ }
+}
+```
+
diff --git a/docs/en/connector-v2/source/SQLServer.md b/docs/en/connector-v2/source/SQLServer.md
new file mode 100644
index 00000000000..662c522af33
--- /dev/null
+++ b/docs/en/connector-v2/source/SQLServer.md
@@ -0,0 +1,154 @@
+# SQL Server
+
+> JDBC SQL Server Source Connector
+
+## Support those engines
+
+> Spark
+> Flink
+> Seatunnel Zeta
+
+## Key features
+
+- [x] [batch](../../concept/connector-v2-features.md)
+- [ ] [stream](../../concept/connector-v2-features.md)
+- [x] [exactly-once](../../concept/connector-v2-features.md)
+- [x] [column projection](../../concept/connector-v2-features.md)
+
+supports query SQL and can achieve projection effect.
+
+- [x] [parallelism](../../concept/connector-v2-features.md)
+- [x] [support user-defined split](../../concept/connector-v2-features.md)
+
+## Description
+
+Read external data source data through JDBC.
+
+## Supported DataSource Info
+
+| datasource | supported versions | driver | url | maven |
+|------------|-------------------------|----------------------------------------------|---------------------------------|-----------------------------------------------------------------------------------|
+| SQL Server | support version >= 2008 | com.microsoft.sqlserver.jdbc.SQLServerDriver | jdbc:sqlserver://localhost:1433 | [Download](https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc) |
+
+## Database dependency
+
+> Please download the support list corresponding to 'Maven' and copy it to the '$SEATNUNNEL_HOME/plugins/jdbc/lib/' working directory
+> For example SQL Server datasource: cp mssql-jdbc-xxx.jar $SEATNUNNEL_HOME/plugins/jdbc/lib/
+
+## Data Type Mapping
+
+| SQLserver Data type | Seatunnel Data type |
+|-----------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|
+| BIT | BOOLEAN |
+| TINYINT
SMALLINT | SHORT |
+| INTEGER | INT |
+| BIGINT | LONG |
+| DECIMAL
NUMERIC
MONEY
SMALLMONEY | DECIMAL((Get the designated column's specified column size)+1,
(Gets the designated column's number of digits to right of the
decimal point.))) |
+| REAL | FLOAT |
+| FLOAT | DOUBLE |
+| CHAR
NCHAR
VARCHAR
NTEXT
NVARCHAR
TEXT | STRING |
+| DATE | LOCAL_DATE |
+| TIME | LOCAL_TIME |
+| DATETIME
DATETIME2
SMALLDATETIME
DATETIMEOFFSET | LOCAL_DATE_TIME |
+| TIMESTAMP
BINARY
VARBINARY
IMAGE
UNKNOWN | Not supported yet |
+
+## Options
+
+| name | type | required | default | Description |
+|------------------------------|--------|----------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| url | String | Yes | - | The URL of the JDBC connection. Refer to a case: jdbc:sqlserver://localhost:1433 |
+| driver | String | Yes | - | The jdbc class name used to connect to the remote data source,
if you use SQLserver the value is `com.microsoft.sqlserver.jdbc.SQLServerDriver`. |
+| user | String | No | - | Connection instance user name |
+| password | String | No | - | Connection instance password |
+| query | String | Yes | - | Query statement |
+| connection_check_timeout_sec | Int | No | 30 | The time in seconds to wait for the database operation used to validate the connection to complete |
+| partition_column | String | No | - | The column name for parallelism's partition, only support numeric type. |
+| partition_lower_bound | Long | No | - | The partition_column min value for scan, if not set SeaTunnel will query database get min value. |
+| partition_upper_bound | Long | No | - | The partition_column max value for scan, if not set SeaTunnel will query database get max value. |
+| partition_num | Int | No | job parallelism | The number of partition count, only support positive integer. default value is job parallelism |
+| fetch_size | Int | No | 0 | For queries that return a large number of objects,you can configure
the row fetch size used in the query toimprove performance by
reducing the number database hits required to satisfy the selection criteria.
Zero means use jdbc default value. |
+| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details |
+
+## tips
+
+> If partition_column is not set, it will run in single concurrency, and if partition_column is set, it will be executed in parallel according to the concurrency of tasks.
+
+## Task Example
+
+### simple:
+
+> This is to access your data source according to your query parameter We can use this if we don't have a speed requirement
+
+```
+# Defining the runtime environment
+env {
+# You can set SQLServer configuration here
+ execution.parallelism = 2
+ job.mode = "BATCH"
+}
+source {
+ Jdbc {
+ url = "jdbc:sqlserver://localhost:1433;databaseName=test"
+ driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+ connection_check_timeout_sec = 100
+ user = "username"
+ password = "password"
+ query = "SELECT * FROM my_table"
+ }
+}
+
+transform {
+ # If you would like to get more information about how to configure seatunnel and see full list of transform plugins,
+ # please go to https://seatunnel.apache.org/docs/transform/sql
+}
+
+sink {
+ Console {}
+}
+
+```
+
+### parallel:
+
+> Read your query table in parallel with the shard field you configured and the shard data You can do this if you want to read the whole table
+
+```
+Jdbc {
+ url = "jdbc:sqlserver://localhost:1433;databaseName=myDatabase"
+ driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+ connection_check_timeout_sec = 100
+ user = "myUsername"
+ password = "myPassword"
+ # Define query logic as required
+ query = "SELECT * FROM myTable"
+ # Parallel sharding reads fields
+ partition_column = "id"
+ # Number of fragments
+ partition_num = 10
+}
+```
+
+### parallel boundary:
+
+> It is more efficient to specify the data within the upper and lower bounds of the query It is more efficient to read your data source according to the upper and lower boundaries you configured
+
+```
+Jdbc {
+ url = "jdbc:sqlserver://localhost:1433;databaseName=myDatabase"
+ driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+ connection_check_timeout_sec = 100
+ user = "myUsername"
+ password = "myPassword"
+ # Define query logic as required
+ query = "SELECT * FROM myTable"
+ partition_column = "id"
+ # Read start boundary
+ partition_lower_bound = 1
+ # Read end boundary
+ partition_upper_bound = 500
+ partition_num = 10
+}
+```
+
+## Changelog
+
diff --git a/docs/en/connector-v2/source/SqlServer-CDC.md b/docs/en/connector-v2/source/SqlServer-CDC.md
index 5b310a9471c..76c3ed16573 100644
--- a/docs/en/connector-v2/source/SqlServer-CDC.md
+++ b/docs/en/connector-v2/source/SqlServer-CDC.md
@@ -167,6 +167,35 @@ source {
Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details.
+## notice
+
+1. CDC needs to enable SQL Server Agent, which can be enabled using the following command.
+
+```Sqlserver
+EXEC xp_servicecontrol N'querystate',N'SQLServerAGENT';
+```
+
+2. Enable CDC at the database level.
+
+```Sqlserver
+USE TestDB;
+GO
+EXEC sys.sp_cdc_enable_db;
+GO
+```
+
+3. Enable CDC at the table level.
+
+```Sqlserver
+USE TestDB;
+EXEC sys.sp_cdc_enable_table
+@source_schema = 'your source name',
+@source_name = 'your table name',
+@role_name = NULL,
+@supports_net_changes = 0;
+GO
+```
+
## Example
```Jdbc {
diff --git a/docs/en/connector-v2/source/redshift.md b/docs/en/connector-v2/source/redshift.md
new file mode 100644
index 00000000000..e4aa6539410
--- /dev/null
+++ b/docs/en/connector-v2/source/redshift.md
@@ -0,0 +1,147 @@
+# Redshift
+
+> JDBC Redshift Source Connector
+
+## Support those engines
+
+> Spark
+> Flink
+> Seatunnel Zeta
+
+## Key features
+
+- [x] [batch](../../concept/connector-v2-features.md)
+- [ ] [stream](../../concept/connector-v2-features.md)
+- [x] [exactly-once](../../concept/connector-v2-features.md)
+- [x] [column projection](../../concept/connector-v2-features.md)
+- [x] [parallelism](../../concept/connector-v2-features.md)
+- [x] [support user-defined split](../../concept/connector-v2-features.md)
+
+> supports query SQL and can achieve projection effect.
+
+## Description
+
+Read external data source data through JDBC.
+
+## Supported DataSource Info
+
+| datasource | supported versions | driver | url | maven |
+|------------|----------------------------------------------------------|---------------------------------|-----------------------------------------|------------------------------------------------------------------------------------|
+| redshift | Different dependency version has different driver class. | com.amazon.redshift.jdbc.Driver | jdbc:redshift://localhost:5439/database | [Download](https://mvnrepository.com/artifact/com.amazon.redshift/redshift-jdbc42) |
+
+## Database dependency
+
+> Please download the support list corresponding to 'Maven' and copy it to the '$SEATNUNNEL_HOME/plugins/jdbc/lib/' working directory
+> For example Redshift datasource: cp RedshiftJDBC42-xxx.jar $SEATNUNNEL_HOME/plugins/jdbc/lib/
+
+## Data Type Mapping
+
+| Redshift Data type | Seatunnel Data type |
+|-------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|
+| SMALLINT
INT2 | SHORT |
+| INTEGER
INT
INT4 | INT |
+| BIGINT
INT8
OID | LONG |
+| DECIMAL
NUMERIC | DECIMAL((Get the designated column's specified column size)+1,
(Gets the designated column's number of digits to right of the decimal point.))) |
+| REAL
FLOAT4 | FLOAT |
+| DOUBLE_PRECISION
FLOAT8
FLOAT | DOUBLE |
+| BOOLEAN
BOOL | BOOLEAN |
+| CHAR
CHARACTER
NCHAR
BPCHAR
VARCHAR
CHARACTER_VARYING
NVARCHAR
TEXT
SUPER | STRING |
+| DATE | LOCALDATE |
+| TIME
TIME_WITH_TIME_ZONE
TIMETZ | LOCALTIME |
+| TIMESTAMP
TIMESTAMP_WITH_OUT_TIME_ZONE
TIMESTAMPTZ | LOCALDATETIME |
+| GEOMETRY | Not supported yet |
+
+## Options
+
+| name | type | required | default | description |
+|------------------------------|--------|----------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| url | String | Yes | - | The URL of the JDBC connection. Refer to a case: jdbc:redshift://localhost:5439/database |
+| driver | String | Yes | - | The jdbc class name used to connect to the remote data source,
if you use Redshift the value is `com.amazon.redshift.jdbc.Driver`. |
+| user | String | No | - | Connection instance user name |
+| password | String | No | - | Connection instance password |
+| query | String | Yes | - | Query statement |
+| connection_check_timeout_sec | Int | No | 30 | The time in seconds to wait for the database operation used to validate the connection to complete |
+| partition_column | String | No | - | The column name for parallelism's partition, only support numeric type,Only support numeric type primary key, and only can config one column. |
+| partition_lower_bound | Long | No | - | The partition_column min value for scan, if not set SeaTunnel will query database get min value. |
+| partition_upper_bound | Long | No | - | The partition_column max value for scan, if not set SeaTunnel will query database get max value. |
+| partition_num | Int | No | job parallelism | The number of partition count, only support positive integer. default value is job parallelism |
+| fetch_size | Int | No | 0 | For queries that return a large number of objects,you can configure
the row fetch size used in the query toimprove performance by
reducing the number database hits required to satisfy the selection criteria.
Zero means use jdbc default value. |
+| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details |
+
+## tips
+
+> If partition_column is not set, it will run in single concurrency, and if partition_column is set, it will be executed in parallel according to the concurrency of tasks.
+
+## Task Example
+
+### simple:
+
+> This example queries type_bin 'table' 16 data in your test "database" in single parallel and queries all of its fields. You can also specify which fields to query for final output to the console.
+
+```
+# Defining the runtime environment
+env {
+ execution.parallelism = 2
+ job.mode = "BATCH"
+}
+
+source{
+ Jdbc {
+ url = "jdbc:redshift://localhost:5439/database?user=myuser&password=mypassword"
+ driver = "com.amazon.redshift.jdbc.Driver"
+ query = "SELECT * FROM mytable LIMIT 100"
+ connectTimeout = 30
+ }
+}
+
+transform {
+ # If you would like to get more information about how to configure seatunnel and see full list of transform plugins,
+ # please go to https://seatunnel.apache.org/docs/transform/sql
+}
+
+sink {
+ Console {}
+}
+```
+
+### parallel:
+
+> Read your query table in parallel with the shard field you configured and the shard data You can do this if you want to read the whole table
+
+```
+source {
+ Jdbc {
+ url = "jdbc:redshift://localhost:5439/database?user=myuser&password=mypassword"
+ driver = "com.amazon.redshift.jdbc.Driver"
+ # Define query logic as required
+ query = "SELECT * FROM myTable"
+ # Parallel sharding reads fields
+ partition_column = "id"
+ # Number of fragments
+ partition_num = 10
+ }
+}
+```
+
+### parallel boundary:
+
+> It is more efficient to specify the data within the upper and lower bounds of the query It is more efficient to read your data source according to the upper and lower boundaries you configured
+
+```
+source {
+ Jdbc {
+ url = "jdbc:redshift://localhost:5439/database?user=myuser&password=mypassword"
+ driver = "com.amazon.redshift.jdbc.Driver"
+ connectTimeout = 30
+ # Define query logic as required
+ query = "select * from mytable"
+ partition_column = "id"
+ # Read start boundary
+ partition_lower_bound = 1
+ # Read end boundary
+ partition_upper_bound = 500
+ partition_num = 10
+ }
+}
+```
+