From fe214a08c5336fa20cba94efef4db609dbffa238 Mon Sep 17 00:00:00 2001 From: rusher Date: Tue, 10 Nov 2020 12:27:55 +0100 Subject: [PATCH 01/21] [misc] ensure pipe test compatibility with all node.js version --- test/integration/test-socket.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/integration/test-socket.js b/test/integration/test-socket.js index f89c17ab..94fa257d 100644 --- a/test/integration/test-socket.js +++ b/test/integration/test-socket.js @@ -63,8 +63,6 @@ describe('test socket', () => { }) .catch((err) => { assert(err.message.includes('connect ENOENT \\\\.\\pipe\\')); - assert.equal(err.errno, 'ENOENT'); - assert.equal(err.code, 'ENOENT'); done(); }); }) From f52f26bc4d6f32fd579b2cd29c8c8c2fee713531 Mon Sep 17 00:00:00 2001 From: rusher Date: Tue, 10 Nov 2020 12:28:52 +0100 Subject: [PATCH 02/21] [CONJS-151] bulk batch error (parameter truncation) #137 --- lib/io/bulk-packet.js | 2 +- test/integration/test-batch.js | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/io/bulk-packet.js b/lib/io/bulk-packet.js index a15f749e..f557aed2 100644 --- a/lib/io/bulk-packet.js +++ b/lib/io/bulk-packet.js @@ -353,7 +353,7 @@ class BulkPacket { let flushed = false; let byteLength = Buffer.byteLength(str, this.encoding); if (byteLength + 9 > this.buf.length - this.pos) { - if (this.buf.length < MAX_BUFFER_SIZE) flushed = this.growBuffer(byteLength); + if (this.buf.length < MAX_BUFFER_SIZE) flushed = this.growBuffer(byteLength + 9); if (byteLength > this.buf.length - this.pos) { //not enough space in buffer, will stream : diff --git a/test/integration/test-batch.js b/test/integration/test-batch.js index 19d4b999..5a1ad85f 100644 --- a/test/integration/test-batch.js +++ b/test/integration/test-batch.js @@ -1276,6 +1276,15 @@ describe('batch', () => { }; describe('standard question mark using bulk', () => { + it('ensure bulk param length encoded size #137', async function () { + if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); + await shareConn.query('DROP TABLE IF EXISTS bufLength'); + await shareConn.query('create table bufLength (val TEXT not null, val2 varchar(10))'); + await shareConn.batch('update bufLength set val=?, val2=?', [ + [Buffer.alloc(16366).toString(), 'abc'] + ]); + }); + const useCompression = false; it('simple batch, local date', function (done) { if (process.env.SKYSQL || !base.utf8Collation()) { From 381f3c50f19ffb2498af89f0c31c89f59d980134 Mon Sep 17 00:00:00 2001 From: rusher Date: Tue, 10 Nov 2020 15:43:53 +0100 Subject: [PATCH 03/21] [misc] test correction, avoiding to reuse existing table --- test/integration/test-error.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/integration/test-error.js b/test/integration/test-error.js index 9e6ef9a0..788cf4e6 100644 --- a/test/integration/test-error.js +++ b/test/integration/test-error.js @@ -586,12 +586,12 @@ describe('Error', () => { .createConnection({ compress: true }) .then((conn) => { conn - .query('DROP TABLE IF EXISTS execute_missing_parameter') + .query('DROP TABLE IF EXISTS execute_missing_parameter2') .then(() => { - return conn.query('CREATE TABLE execute_missing_parameter (id int, id2 int, id3 int)'); + return conn.query('CREATE TABLE execute_missing_parameter2 (id int, id2 int, id3 int)'); }) .then(() => { - return conn.query('INSERT INTO execute_missing_parameter values (?, ?, ?)', [1, 3]); + return conn.query('INSERT INTO execute_missing_parameter2 values (?, ?, ?)', [1, 3]); }) .then(() => { done(new Error('must have thrown error !')); @@ -604,7 +604,7 @@ describe('Error', () => { assert.ok( err.message.includes( 'Parameter at position 3 is not set\n' + - 'sql: INSERT INTO execute_missing_parameter values (?, ?, ?) - parameters:[1,3]' + 'sql: INSERT INTO execute_missing_parameter2 values (?, ?, ?) - parameters:[1,3]' ) ); return conn From 2028045fe208205ff52e1dbe30530f1081d504cb Mon Sep 17 00:00:00 2001 From: rusher Date: Tue, 10 Nov 2020 15:53:29 +0100 Subject: [PATCH 04/21] [misc] removing warning: please use IANA standard timezone format, since supported. --- lib/config/connection-options.js | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/config/connection-options.js b/lib/config/connection-options.js index ec0067e4..fadfe193 100644 --- a/lib/config/connection-options.js +++ b/lib/config/connection-options.js @@ -122,12 +122,6 @@ class ConnectionOptions { ); } tzName = 'Etc/GMT' + (matched[1] === '-' ? '+' : '') + hour; - console.log( - "warning: please use IANA standard timezone format ('Etc/GMT" + - (matched[1] === '-' ? '+' : '') + - hour + - "')" - ); } } this.localTz = moment.tz.guess(); From ffdb0741bef32f7ea68017ede22b0a9f69c40948 Mon Sep 17 00:00:00 2001 From: rusher Date: Thu, 12 Nov 2020 18:53:10 +0100 Subject: [PATCH 05/21] [CONJS-152] correction when enabling the `permitLocalInfile` option and initial commands Issue was that connection will hang after authentication. --- lib/connection.js | 4 ++-- lib/const/connection_status.js | 7 ++++--- test/integration/test-local-infile.js | 15 +++++++++++++++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/lib/connection.js b/lib/connection.js index 8ba4ae10..e50a07bd 100644 --- a/lib/connection.js +++ b/lib/connection.js @@ -983,7 +983,7 @@ function Connection(options) { if (!err.fatal) this.end().catch((err) => {}); process.nextTick(rejected, err); }; - + _status = Status.INIT_CMD; _executeSessionVariableQuery() .then(() => { return _checkServerTimezone(); @@ -1182,7 +1182,7 @@ function Connection(options) { }); //send immediately only if no current active receiver - if (_sendQueue.isEmpty() && _status === Status.CONNECTED) { + if (_sendQueue.isEmpty() && (_status === Status.INIT_CMD || _status === Status.CONNECTED)) { if (_receiveQueue.peekFront()) { _receiveQueue.push(cmd); _sendQueue.push(cmd); diff --git a/lib/const/connection_status.js b/lib/const/connection_status.js index f5fc3cee..82334eba 100644 --- a/lib/const/connection_status.js +++ b/lib/const/connection_status.js @@ -4,9 +4,10 @@ const Status = { NOT_CONNECTED: 1, CONNECTING: 2, AUTHENTICATING: 3, - CONNECTED: 4, - CLOSING: 5, - CLOSED: 6 + INIT_CMD: 4, + CONNECTED: 5, + CLOSING: 6, + CLOSED: 7 }; module.exports.Status = Status; diff --git a/test/integration/test-local-infile.js b/test/integration/test-local-infile.js index ccee8a4f..3b9da050 100644 --- a/test/integration/test-local-infile.js +++ b/test/integration/test-local-infile.js @@ -75,6 +75,21 @@ describe('local-infile', () => { .catch(done); }); + it('local infile and init functions', function (done) { + base + .createConnection({ permitLocalInfile: true, initSql: "set time_zone='+00:00'" }) + .then((conn) => { + conn + .query('SELECT 1') + .then(() => { + conn.end(); + done(); + }) + .catch(done); + }) + .catch(done); + }); + it('local infile disable using default options', function (done) { base .createConnection({ pipelining: undefined, permitLocalInfile: undefined }) From f311a13a0267455d768e0444ebecccb9bebfea5c Mon Sep 17 00:00:00 2001 From: rusher Date: Fri, 13 Nov 2020 17:57:12 +0100 Subject: [PATCH 06/21] [CONJS-154] Timezone support correction and clarification Actual timezone support has 2 issues, * use of moment.setDefault that can be changed by the environment. * when using timezone, connection default timezone is not set, possibly causing issue with function sensible to timezone, like NOW() connection timezone is now set by default. If server doesn't recognized timezone format, a warning will be issued. This setting can be disabled using option `skipSetTimezone` --- documentation/callback-api.md | 40 ++++++- documentation/promise-api.md | 42 ++++++- lib/cmd/common-text-cmd.js | 23 +++- lib/config/connection-options.js | 12 +- lib/connection.js | 129 +++++++++++---------- lib/io/bulk-packet.js | 38 ++++-- lib/io/packet.js | 5 +- lib/misc/utils.js | 4 +- package.json | 2 +- test/integration/datatype/test-datetime.js | 23 ++++ test/integration/test-connection-opts.js | 27 +++++ 11 files changed, 252 insertions(+), 93 deletions(-) diff --git a/documentation/callback-api.md b/documentation/callback-api.md index 2ce22889..a7ade24b 100644 --- a/documentation/callback-api.md +++ b/documentation/callback-api.md @@ -46,16 +46,44 @@ This initializes the constant `mariadb`, which is set to use the Callback API ra ## Timezone consideration -It's not recommended, but in some cases, Node.js and database are configured with different timezone. +Client and database can have a different timezone. -By default, `timezone` option is set to 'local' value, indicating to use client timezone, so no conversion will be done. +The connector has different solutions when this is the case. +the `timezone` option can have the following value: +* 'local' (default) : connector doesn't do any conversion. If the database has a different timezone, there will be an offset issue. +* 'auto' : connector retrieve server timezone. Dates will be converted if server timezone differs from client +* IANA timezone / offset, example 'America/New_York' or '+06:00'. -If client and server timezone differ, `timezone` option has to be set to server timezone. -- 'auto' value means client will request server timezone when creating a connection, and use server timezone afterwhile. -- To avoid this additional command on connection, `timezone` can be set to [IANA time zone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). +##### IANA timezone / offset -Connector will then convert date to server timezone, rather than the current Node.js timezone. +When using IANA timezone, the connector will set the connection timezone to the timezone. +this can throw an error on connection if timezone is unknown by the server (see [mariadb timezone documentation](https://mariadb.com/kb/en/time-zones/), timezone tables might be not initialized) +If you are sure the server is using that timezone, this step can be skipped with the option `skipSetTimezone`. +If timezone correspond to javascript default timezone, then no conversion will be done + +##### Timezone setting recommendation. +The best is to have the same timezone on client and database, then keep the 'local' default value. + +If different, then either client or server has to convert date. +In general, that is best to use client conversion, to avoid putting any unneeded stress on the database. +timezone has to be set to the IANA timezone corresponding to server timezone and disabled `skipSetTimezone` option since you are sure that the server has the corresponding timezone. + +example: client use 'America/New_York' by default, and server 'America/Los_Angeles'. +execute 'SELECT @@system_time_zone' on the server. that will give the server default timezone. +the server can return POSIX timezone like 'PDT' (Pacific Daylight Time). +IANA timezone correspondence must be found : (see [IANA timezone List](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)) and configure client-side. +This will ensure DST (automatic date saving time change will be handled) +```js +const mariadb = require('mariadb'); +const conn = mariadb.createConnection({ + host: process.env.DB_HOST, + user: process.env.DB_USER, + password: process.env.DB_PWD, + timezone: 'America/Los_Angeles', + skipSetTimezone: true +}); +``` ## Security consideration diff --git a/documentation/promise-api.md b/documentation/promise-api.md index 6b095314..50f836d1 100644 --- a/documentation/promise-api.md +++ b/documentation/promise-api.md @@ -54,17 +54,47 @@ const mariadb = require('mariadb'); ### Timezone consideration -It's not recommended, but in some cases, Node.js and database are configured with different timezone. +Client and database can have a different timezone. -By default, `timezone` option is set to 'local' value, indicating to use client timezone, so no conversion will be done. +The connector has different solutions when this is the case. +the `timezone` option can have the following value: +* 'local' (default) : connector doesn't do any conversion. If the database has a different timezone, there will be an offset issue. +* 'auto' : connector retrieve server timezone. Dates will be converted if server timezone differs from client +* IANA timezone / offset, example 'America/New_York' or '+06:00'. -If client and server timezone differ, `timezone` option has to be set to server timezone. -- 'auto' value means client will request server timezone when creating a connection, and use server timezone afterwhile. -- To avoid this additional command on connection, `timezone` can be set to [IANA time zone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). +##### IANA timezone / offset -Connector will then convert date to server timezone, rather than the current Node.js timezone. +When using IANA timezone, the connector will set the connection timezone to the timezone. +this can throw an error on connection if timezone is unknown by the server (see [mariadb timezone documentation](https://mariadb.com/kb/en/time-zones/), timezone tables might be not initialized) +If you are sure the server is using that timezone, this step can be skipped with the option `skipSetTimezone`. +If timezone correspond to javascript default timezone, then no conversion will be done +##### Timezone setting recommendation. +The best is to have the same timezone on client and database, then keep the 'local' default value. + +If different, then either client or server has to convert date. +In general, that is best to use client conversion, to avoid putting any unneeded stress on the database. +timezone has to be set to the IANA timezone corresponding to server timezone and disabled `skipSetTimezone` option since you are sure that the server has the corresponding timezone. + +example: client use 'America/New_York' by default, and server 'America/Los_Angeles'. +execute 'SELECT @@system_time_zone' on the server. that will give the server default timezone. +the server can return POSIX timezone like 'PDT' (Pacific Daylight Time). +IANA timezone correspondence must be found : (see [IANA timezone List](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)) and configure client-side. +This will ensure DST (automatic date saving time change will be handled) + +```js +const mariadb = require('mariadb'); +const conn = mariadb.createConnection({ + host: process.env.DB_HOST, + user: process.env.DB_USER, + password: process.env.DB_PWD, + timezone: 'America/Los_Angeles', + skipSetTimezone: true +}); +``` + + ### Security consideration Connection details such as URL, username, and password are better hidden into environment variables. diff --git a/lib/cmd/common-text-cmd.js b/lib/cmd/common-text-cmd.js index e673c56d..0eb6f249 100644 --- a/lib/cmd/common-text-cmd.js +++ b/lib/cmd/common-text-cmd.js @@ -4,6 +4,7 @@ const ResultSet = require('./resultset'); const FieldDetail = require('../const/field-detail'); const FieldType = require('../const/field-type'); const Long = require('long'); +const moment = require('moment-timezone'); const QUOTE = 0x27; class CommonText extends ResultSet { @@ -12,7 +13,11 @@ class CommonText extends ResultSet { this.configAssign(connOpts, cmdOpts); this.sql = sql; this.initialValues = values; - this.getDateQuote = this.opts.tz ? CommonText.getTimezoneDate : CommonText.getLocalDate; + this.getDateQuote = this.opts.tz + ? this.opts.tz === 'Etc/UTC' + ? CommonText.getUtcDate + : CommonText.getTimezoneDate + : CommonText.getLocalDate; } /** @@ -398,13 +403,25 @@ function getLocalDate(date, opts) { return getDatePartQuote(year, mon, day, hour, min, sec, ms); } +function getUtcDate(date, opts) { + const year = date.getUTCFullYear(); + const mon = date.getUTCMonth() + 1; + const day = date.getUTCDate(); + const hour = date.getUTCHours(); + const min = date.getUTCMinutes(); + const sec = date.getUTCSeconds(); + const ms = date.getUTCMilliseconds(); + return getDatePartQuote(year, mon, day, hour, min, sec, ms); +} + function getTimezoneDate(date, opts) { if (date.getMilliseconds() != 0) { - return opts.tz(date).format("'YYYY-MM-DD HH:mm:ss.SSS'"); + return moment.tz(date, opts.tz).format("'YYYY-MM-DD HH:mm:ss.SSS'"); } - return opts.tz(date).format("'YYYY-MM-DD HH:mm:ss'"); + return moment.tz(date, opts.tz).format("'YYYY-MM-DD HH:mm:ss'"); } module.exports = CommonText; module.exports.getTimezoneDate = getTimezoneDate; +module.exports.getUtcDate = getUtcDate; module.exports.getLocalDate = getLocalDate; diff --git a/lib/config/connection-options.js b/lib/config/connection-options.js index fadfe193..d8078357 100644 --- a/lib/config/connection-options.js +++ b/lib/config/connection-options.js @@ -105,6 +105,7 @@ class ConnectionOptions { this.supportBigNumbers = opts.supportBigNumbers || false; this.supportBigInt = opts.supportBigInt || false; this.timezone = opts.timezone || 'local'; + this.skipSetTimezone = opts.skipSetTimezone || false; if (this.timezone && this.timezone !== 'local' && this.timezone !== 'auto') { let tzName = this.timezone; if (this.timezone === 'Z') { @@ -121,15 +122,19 @@ class ConnectionOptions { "'" ); } - tzName = 'Etc/GMT' + (matched[1] === '-' ? '+' : '') + hour; + if (hour == 0) { + tzName = 'Etc/UTC'; + } else { + tzName = 'Etc/GMT' + (matched[1] === '-' ? '+' : '') + hour; + } } } this.localTz = moment.tz.guess(); if (tzName === this.localTz) { this.tz = null; } else { - this.tz = moment.tz.setDefault(tzName); - if (!this.tz.defaultZone) { + this.tz = tzName; + if (!moment.tz.zone(tzName)) { throw Errors.createError( "Unknown IANA timezone '" + tzName + "'.", true, @@ -181,6 +186,7 @@ class ConnectionOptions { if (opts.debug) opts.debug = opts.debug == 'true'; if (opts.autoJsonMap) opts.autoJsonMap = opts.autoJsonMap == 'true'; if (opts.arrayParenthesis) opts.arrayParenthesis = opts.arrayParenthesis == 'true'; + if (opts.skipSetTimezone) opts.skipSetTimezone = opts.skipSetTimezone == 'true'; if (opts.checkDuplicate) opts.checkDuplicate = opts.checkDuplicate == 'true'; if (opts.debugCompress) opts.debugCompress = opts.debugCompress == 'true'; diff --git a/lib/connection.js b/lib/connection.js index e50a07bd..77b7c8c8 100644 --- a/lib/connection.js +++ b/lib/connection.js @@ -156,7 +156,7 @@ function Connection(options) { * @param values object / array of placeholder values (not mandatory) * @returns {Promise} promise */ - this.query = (sql, values) => { + this._queryPromise = (sql, values) => { let _cmdOpt, _sql, _values = values; @@ -784,41 +784,63 @@ function Connection(options) { * @private */ const _checkServerTimezone = () => { - if (opts.timezone !== 'auto') { - return Promise.resolve(); - } - return this.query('SELECT @@system_time_zone stz, @@time_zone tz').then((res) => { - const serverTimezone = res[0].tz === 'SYSTEM' ? res[0].stz : res[0].tz; - const serverZone = moment.tz.zone(serverTimezone); - if (serverZone) { - if (serverTimezone === moment.tz.guess()) { - //db server and client use same timezone, avoid any conversion - opts.tz = null; + if (opts.timezone === 'auto') { + return this._queryPromise('SELECT @@system_time_zone stz, @@time_zone tz').then((res) => { + const serverTimezone = res[0].tz === 'SYSTEM' ? res[0].stz : res[0].tz; + const serverZone = moment.tz.zone(serverTimezone); + if (serverZone) { + const localTz = moment.tz.guess(); + if (serverTimezone === localTz) { + //db server and client use same timezone, avoid any conversion + opts.tz = null; + } else { + opts.localTz = localTz; + opts.tz = serverTimezone; + } } else { - opts.tz = moment.tz.setDefault(serverTimezone); + return Promise.reject( + Errors.createError( + "Automatic timezone setting fails. Server timezone '" + + serverTimezone + + "' does't have a corresponding IANA timezone. Option timezone must be set according to server timezone", + true, + info, + '08S01', + Errors.ER_WRONG_AUTO_TIMEZONE + ) + ); } - } else { - return Promise.reject( - Errors.createError( - "Automatic timezone setting fails. Server timezone '" + - serverTimezone + - "' does't have a corresponding IANA timezone. Option timezone must be set according to server timezone", - true, - info, - '08S01', - Errors.ER_WRONG_AUTO_TIMEZONE - ) - ); + return Promise.resolve(); + }); + } + if (opts.tz && !opts.skipSetTimezone) { + let tz = opts.tz; + if (opts.tz === 'Etc/UTC') { + tz = '+00:00'; + } else if (opts.tz.startsWith('Etc/GMT')) { + let zone = moment.tz.zone(opts.tz); + tz = zone.abbrs[0] + ':00'; } - return Promise.resolve(); - }); + + return this._queryPromise('SET time_zone=?', tz) + .then((res) => { + return Promise.resolve(); + }) + .catch((err) => { + console.log( + `warning: setting timezone '${opts.tz}' fails on server.\n look at https://mariadb.com/kb/en/mysql_tzinfo_to_sql/ to load IANA timezone.\nSetting timezone can be disabled with option \`skipSetTimezone\`` + ); + return Promise.resolve(); + }); + } + return Promise.resolve(); }; const _checkServerVersion = () => { if (!opts.forceVersionCheck) { return Promise.resolve(); } - return this.query('SELECT @@VERSION AS v').then((res) => { + return this._queryPromise('SELECT @@VERSION AS v').then((res) => { info.serverVersion.raw = res[0].v; info.serverVersion.mariaDb = info.serverVersion.raw.includes('MariaDB'); ConnectionInformation.parseVersionString(info); @@ -831,27 +853,21 @@ function Connection(options) { const initialArr = Array.isArray(opts.initSql) ? opts.initSql : [opts.initSql]; const initialPromises = []; initialArr.forEach((sql) => { - initialPromises.push( - new Promise(function (resolve, reject) { - const errorHandling = (initialErr) => { - reject( - Errors.createError( - 'Error executing initial sql command: ' + initialErr.message, - true, - info, - '08S01', - Errors.ER_INITIAL_SQL_ERROR, - null - ) - ); - }; - const cmd = new Query(resolve, errorHandling, null, opts, sql, null); - if (opts.trace) Error.captureStackTrace(cmd); - _addCommand(cmd); - }) + initialPromises.push(this._queryPromise(sql)); + }); + + return Promise.all(initialPromises).catch((initialErr) => { + return Promise.reject( + Errors.createError( + 'Error executing initial sql command: ' + initialErr.message, + true, + info, + '08S01', + Errors.ER_INITIAL_SQL_ERROR, + null + ) ); }); - return Promise.all(initialPromises); } return Promise.resolve(); }; @@ -859,9 +875,9 @@ function Connection(options) { const _executeSessionTimeout = () => { if (opts.queryTimeout) { if (info.isMariaDB() && info.hasMinVersion(10, 1, 2)) { - return new Promise(function (resolve, reject) { - const errorHandling = (initialErr) => { - reject( + this._queryPromise('SET max_statement_time=' + opts.queryTimeout / 1000).catch( + (initialErr) => { + return Promise.reject( Errors.createError( 'Error setting session queryTimeout: ' + initialErr.message, true, @@ -871,18 +887,8 @@ function Connection(options) { null ) ); - }; - const cmd = new Query( - resolve, - errorHandling, - null, - opts, - 'SET max_statement_time=' + opts.queryTimeout / 1000, - null - ); - if (opts.trace) Error.captureStackTrace(cmd); - _addCommand(cmd); - }); + } + ); } else { return Promise.reject( Errors.createError( @@ -1407,6 +1413,7 @@ function Connection(options) { let _out = new PacketOutputStream(opts, info); let _in = new PacketInputStream(_unexpectedPacket.bind(this), _receiveQueue, _out, opts, info); + this.query = this._queryPromise; this.escape = Utils.escape.bind(this, opts, info); this.escapeId = Utils.escapeId.bind(this, opts, info); diff --git a/lib/io/bulk-packet.js b/lib/io/bulk-packet.js index f557aed2..8a42c27e 100644 --- a/lib/io/bulk-packet.js +++ b/lib/io/bulk-packet.js @@ -1,5 +1,6 @@ 'use strict'; +const moment = require('moment-timezone'); const Iconv = require('iconv-lite'); const SMALL_BUFFER_SIZE = 1024; const MEDIUM_BUFFER_SIZE = 16384; //16k @@ -27,7 +28,11 @@ class BulkPacket { this.waitingResponseNo = 1; this.singleQuery = false; this.haveErrorResponse = false; - this.writeBinaryDate = opts.tz ? this.writeBinaryTimezoneDate : this.writeBinaryLocalDate; + this.writeBinaryDate = opts.tz + ? opts.tz === 'Etc/UTC' + ? this.writeBinaryUtcDate + : this.writeBinaryTimezoneDate + : this.writeBinaryLocalDate; if (this.encoding === 'utf8') { this.writeLengthEncodedString = this.writeDefaultLengthEncodedString; } else if (Buffer.isEncoding(this.encoding)) { @@ -377,6 +382,17 @@ class BulkPacket { return this._writeBinaryDate(year, mon, day, hour, min, sec, ms); } + writeBinaryUtcDate(date, opts) { + const year = date.getUTCFullYear(); + const mon = date.getUTCMonth() + 1; + const day = date.getUTCDate(); + const hour = date.getUTCHours(); + const min = date.getUTCMinutes(); + const sec = date.getUTCSeconds(); + const ms = date.getUTCMilliseconds(); + return this._writeBinaryDate(year, mon, day, hour, min, sec, ms); + } + _writeBinaryDate(year, mon, day, hour, min, sec, ms) { let len = ms === 0 ? 7 : 11; //not enough space remaining @@ -423,16 +439,16 @@ class BulkPacket { } writeBinaryTimezoneDate(date, opts) { - const formated = opts.tz(date).format('YYYY-MM-DD HH:mm:ss.SSSSSS'); - const dateZoned = new Date(formated + 'Z'); - - const year = dateZoned.getUTCFullYear(); - const mon = dateZoned.getUTCMonth() + 1; - const day = dateZoned.getUTCDate(); - const hour = dateZoned.getUTCHours(); - const min = dateZoned.getUTCMinutes(); - const sec = dateZoned.getUTCSeconds(); - const ms = dateZoned.getUTCMilliseconds(); + const dateZoned = new Date( + moment.tz(date, opts.localTz).tz(opts.tz).format('YYYY-MM-DD HH:mm:ss.SSSSSS') + ); + const year = dateZoned.getFullYear(); + const mon = dateZoned.getMonth() + 1; + const day = dateZoned.getDate(); + const hour = dateZoned.getHours(); + const min = dateZoned.getMinutes(); + const sec = dateZoned.getSeconds(); + const ms = dateZoned.getMilliseconds(); return this._writeBinaryDate(year, mon, day, hour, min, sec, ms); } diff --git a/lib/io/packet.js b/lib/io/packet.js index 62ce9a16..ddcb5d2f 100644 --- a/lib/io/packet.js +++ b/lib/io/packet.js @@ -3,6 +3,7 @@ const Errors = require('../misc/errors'); const Iconv = require('iconv-lite'); const Long = require('long'); +const moment = require('moment-timezone'); /** * Object to easily parse buffer. @@ -361,7 +362,9 @@ class Packet { if (str.startsWith('0000-00-00 00:00:00')) return null; if (opts.tz) { - return new Date(opts.tz(str).clone().tz(opts.localTz).format('YYYY-MM-DD HH:mm:ss.SSSSSS')); + return new Date( + moment.tz(str, opts.tz).clone().tz(opts.localTz).format('YYYY-MM-DD HH:mm:ss.SSSSSS') + ); } return new Date(str); } diff --git a/lib/misc/utils.js b/lib/misc/utils.js index 2b6e1640..ace3bfc2 100644 --- a/lib/misc/utils.js +++ b/lib/misc/utils.js @@ -128,7 +128,9 @@ module.exports.escape = (opts, info, value) => { case 'object': if (Object.prototype.toString.call(value) === '[object Date]') { return opts.tz - ? CommonText.getTimezoneDate(value, opts) + ? opts.tz === 'Etc/UTC' + ? CommonText.getUtcDate(value, opts) + : CommonText.getTimezoneDate(value, opts) : CommonText.getLocalDate(value, opts); } else if (Buffer.isBuffer(value)) { let stValue; diff --git a/package.json b/package.json index 69342041..d6835243 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "mariadb", - "version": "2.5.1", + "version": "2.6.0", "description": "fast mariadb/mysql connector.", "main": "promise.js", "types": "types/index.d.ts", diff --git a/test/integration/datatype/test-datetime.js b/test/integration/datatype/test-datetime.js index 4527f8e0..85250490 100644 --- a/test/integration/datatype/test-datetime.js +++ b/test/integration/datatype/test-datetime.js @@ -110,6 +110,29 @@ describe('datetime', () => { .catch(done); }); + it('date text America/New_York timezone', function (done) { + const date = new Date('1999-01-31 12:13:14'); + if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6)) this.skip(); + base + .createConnection({ timezone: 'America/New_York'}) + .then((conn) => { + conn + .query({ sql: 'select CAST(? as datetime) d' }, [date]) + .then((res) => { + assert.equal(Object.prototype.toString.call(res[0].d), '[object Date]'); + assert.equal(res[0].d.getDate(), date.getDate()); + assert.equal(res[0].d.getHours(), date.getHours()); + assert.equal(res[0].d.getMinutes(), date.getMinutes()); + assert.equal(res[0].d.getSeconds(), date.getSeconds()); + conn.close(); + done(); + }) + .catch(done); + }) + .catch(done); + }); + + it('date text from row', function (done) { if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6)) this.skip(); shareConn diff --git a/test/integration/test-connection-opts.js b/test/integration/test-connection-opts.js index b53dca5d..62c1f8f2 100644 --- a/test/integration/test-connection-opts.js +++ b/test/integration/test-connection-opts.js @@ -96,6 +96,33 @@ describe('connection option', () => { .catch(done); }); + it('timezone +0h', function (done) { + base + .createConnection({ timezone: '+00:00' }) + .then((conn) => { + let d = new Date('2000-01-01T00:00:00Z'); + conn + .query('SELECT UNIX_TIMESTAMP(?) tt', [d]) + .then((res) => { + assert.deepEqual(res[0].tt, d.getTime() / 1000); + return conn.query( + "SELECT TIMESTAMP('2003-12-31 12:00:00') tt1, FROM_UNIXTIME(UNIX_TIMESTAMP(?)) tt2", + [d] + ); + }) + .then((res) => { + assert.deepEqual(res[0].tt1, new Date('2003-12-31T12:00:00Z')); + assert.deepEqual(res[0].tt2, d); + return conn.end(); + }) + .then(() => { + done(); + }) + .catch(done); + }) + .catch(done); + }); + it('timezone +2h', function (done) { base .createConnection({ timezone: '+02' }) From 1206ad0af789d02f60c34d97628b8b0e78056003 Mon Sep 17 00:00:00 2001 From: rusher Date: Mon, 16 Nov 2020 10:55:07 +0100 Subject: [PATCH 07/21] [misc] dependency version update, style correction --- lib/connection.js | 18 ++++----- package.json | 18 ++++----- test/integration/datatype/test-datetime.js | 43 +++++++++++----------- 3 files changed, 39 insertions(+), 40 deletions(-) diff --git a/lib/connection.js b/lib/connection.js index 77b7c8c8..eef771e5 100644 --- a/lib/connection.js +++ b/lib/connection.js @@ -823,15 +823,15 @@ function Connection(options) { } return this._queryPromise('SET time_zone=?', tz) - .then((res) => { - return Promise.resolve(); - }) - .catch((err) => { - console.log( - `warning: setting timezone '${opts.tz}' fails on server.\n look at https://mariadb.com/kb/en/mysql_tzinfo_to_sql/ to load IANA timezone.\nSetting timezone can be disabled with option \`skipSetTimezone\`` - ); - return Promise.resolve(); - }); + .then((res) => { + return Promise.resolve(); + }) + .catch((err) => { + console.log( + `warning: setting timezone '${opts.tz}' fails on server.\n look at https://mariadb.com/kb/en/mysql_tzinfo_to_sql/ to load IANA timezone.\nSetting timezone can be disabled with option \`skipSetTimezone\`` + ); + return Promise.resolve(); + }); } return Promise.resolve(); }; diff --git a/package.json b/package.json index d6835243..0ed420c7 100644 --- a/package.json +++ b/package.json @@ -46,31 +46,31 @@ "license": "LGPL-2.1-or-later", "dependencies": { "@types/geojson": "^7946.0.7", - "@types/node": "^14.11.2", + "@types/node": "^14.14.7", "denque": "^1.4.1", "iconv-lite": "^0.6.2", "long": "^4.0.0", - "moment-timezone": "^0.5.31", + "moment-timezone": "^0.5.32", "please-upgrade-node": "^3.2.0" }, "devDependencies": { - "@typescript-eslint/eslint-plugin": "^4.3.0", - "@typescript-eslint/parser": "^4.3.0", + "@typescript-eslint/eslint-plugin": "^4.7.0", + "@typescript-eslint/parser": "^4.7.0", "benchmark": "^2.1.4", "chai": "^4.2.0", - "codecov": "^3.8.0", + "codecov": "^3.8.1", "colors": "^1.4.0", "dom-parser": "^0.1.6", "error-stack-parser": "^2.0.6", - "eslint": "^7.11.0", - "eslint-config-prettier": "^6.12.0", + "eslint": "^7.13.0", + "eslint-config-prettier": "^6.15.0", "eslint-plugin-markdown": "^1.0.1", "eslint-plugin-prettier": "^3.1.0", - "mocha": "^8.1.3", + "mocha": "^8.2.1", "mocha-lcov-reporter": "^1.3.0", "nyc": "^15.0.0", "prettier": "^2.1.2", - "typescript": "^4.0.3" + "typescript": "^4.0.5" }, "bugs": { "url": "https://jira.mariadb.org/projects/CONJS/" diff --git a/test/integration/datatype/test-datetime.js b/test/integration/datatype/test-datetime.js index 85250490..0ed2a39d 100644 --- a/test/integration/datatype/test-datetime.js +++ b/test/integration/datatype/test-datetime.js @@ -110,28 +110,27 @@ describe('datetime', () => { .catch(done); }); - it('date text America/New_York timezone', function (done) { - const date = new Date('1999-01-31 12:13:14'); - if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6)) this.skip(); - base - .createConnection({ timezone: 'America/New_York'}) - .then((conn) => { - conn - .query({ sql: 'select CAST(? as datetime) d' }, [date]) - .then((res) => { - assert.equal(Object.prototype.toString.call(res[0].d), '[object Date]'); - assert.equal(res[0].d.getDate(), date.getDate()); - assert.equal(res[0].d.getHours(), date.getHours()); - assert.equal(res[0].d.getMinutes(), date.getMinutes()); - assert.equal(res[0].d.getSeconds(), date.getSeconds()); - conn.close(); - done(); - }) - .catch(done); - }) - .catch(done); - }); - + it('date text America/New_York timezone', function (done) { + const date = new Date('1999-01-31 12:13:14'); + if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6)) this.skip(); + base + .createConnection({ timezone: 'America/New_York' }) + .then((conn) => { + conn + .query({ sql: 'select CAST(? as datetime) d' }, [date]) + .then((res) => { + assert.equal(Object.prototype.toString.call(res[0].d), '[object Date]'); + assert.equal(res[0].d.getDate(), date.getDate()); + assert.equal(res[0].d.getHours(), date.getHours()); + assert.equal(res[0].d.getMinutes(), date.getMinutes()); + assert.equal(res[0].d.getSeconds(), date.getSeconds()); + conn.close(); + done(); + }) + .catch(done); + }) + .catch(done); + }); it('date text from row', function (done) { if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6)) this.skip(); From e41b0ee3ac216aeb3450fba03a9743e8fc1e68f4 Mon Sep 17 00:00:00 2001 From: rusher Date: Thu, 19 Nov 2020 11:35:46 +0100 Subject: [PATCH 08/21] [misc] travis SkySQL HA testing addition --- .travis.yml | 2 ++ .travis/script.sh | 34 ++++++++++++++++++++++------------ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/.travis.yml b/.travis.yml index 059ca8ce..fcbbf93e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -46,6 +46,8 @@ matrix: env: DB=build SKIP_LEAK=1 - node_js: "12" env: SKYSQL=true SKIP_LEAK=1 + - node_js: "12" + env: SKYSQL_HA=true SKIP_LEAK=1 - node_js: "10" env: DB=mariadb:10.5 - node_js: "12" diff --git a/.travis/script.sh b/.travis/script.sh index e4082f28..f045ffcc 100644 --- a/.travis/script.sh +++ b/.travis/script.sh @@ -6,19 +6,29 @@ set -e ################################################################################################################### # test different type of configuration ################################################################################################################### - -if [ -n "$SKYSQL" ] ; then - - if [ -z "$SKYSQL_HOST" ] ; then - echo "No SkySQL configuration found !" - exit 0 +if [ -n "$SKYSQL" ] || [ -n "$SKYSQL_HA" ]; then + if [ -n "$SKYSQL" ] ; then + if [ -z "$SKYSQL_HOST" ] ; then + echo "No SkySQL configuration found !" + exit 0 + else + export TEST_USER=$SKYSQL_USER + export TEST_HOST=$SKYSQL_HOST + export TEST_PASSWORD=$SKYSQL_PASSWORD + export TEST_PORT=$SKYSQL_PORT + export TEST_SSL_CA=$SKYSQL_SSL_CA + fi else - export TEST_USER=$SKYSQL_USER - export TEST_HOST=$SKYSQL_HOST - export TEST_PASSWORD=$SKYSQL_PASSWORD - export TEST_PORT=$SKYSQL_PORT - export TEST_SSL_CA=$SKYSQL_SSL_CA - export TEST_BULK=false + if [ -z "$SKYSQL_HA_HOST" ] ; then + echo "No SkySQL configuration found !" + exit 0 + else + export TEST_USER=$SKYSQL_HA_USER + export TEST_HOST=$SKYSQL_HA_HOST + export TEST_PASSWORD=$SKYSQL_HA_PASSWORD + export TEST_PORT=$SKYSQL_HA_PORT + export TEST_SSL_CA=$SKYSQL_HA_SSL_CA + fi fi else From 9f84323c8e718e5111a80220f30bc1f2656c6e07 Mon Sep 17 00:00:00 2001 From: rusher Date: Tue, 24 Nov 2020 12:20:17 +0100 Subject: [PATCH 09/21] [misc] travis SkySQL HA testing addition --- .travis.yml | 3 +- test/integration/test-auth-plugin.js | 3 +- test/integration/test-batch-callback.js | 425 ++-- test/integration/test-batch-geometry-type.js | 253 +- test/integration/test-batch.js | 2393 ++++++++---------- test/integration/test-big-query.js | 108 +- test/integration/test-call.js | 2 +- test/integration/test-change-user.js | 3 +- test/integration/test-cluster.js | 64 +- test/integration/test-compression.js | 50 +- test/integration/test-connection-opts.js | 44 +- test/integration/test-connection.js | 20 +- test/integration/test-debug.js | 19 +- test/integration/test-error.js | 6 +- test/integration/test-multi-results.js | 14 +- test/integration/test-ok-packet.js | 2 +- test/integration/test-pool-callback-event.js | 4 +- test/integration/test-pool-callback.js | 21 +- test/integration/test-pool-event.js | 2 +- test/integration/test-pool.js | 58 +- test/integration/test-ssl.js | 5 +- 21 files changed, 1590 insertions(+), 1909 deletions(-) diff --git a/.travis.yml b/.travis.yml index fcbbf93e..9d4e6029 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,6 +19,7 @@ install: # generate SSL certificates - mkdir tmp - chmod +x .travis/gen-ssl.sh + - chmod +x .travis/sql/* - chmod +x .travis/build/build.sh - chmod +x .travis/build/docker-entrypoint.sh - chmod 777 .travis/build/ @@ -47,7 +48,7 @@ matrix: - node_js: "12" env: SKYSQL=true SKIP_LEAK=1 - node_js: "12" - env: SKYSQL_HA=true SKIP_LEAK=1 + env: SKYSQL_HA=true MAXSCALE_TEST_DISABLE=true SKIP_LEAK=1 - node_js: "10" env: DB=mariadb:10.5 - node_js: "12" diff --git a/test/integration/test-auth-plugin.js b/test/integration/test-auth-plugin.js index 8d77f9f7..07097c11 100644 --- a/test/integration/test-auth-plugin.js +++ b/test/integration/test-auth-plugin.js @@ -280,7 +280,8 @@ describe('authentication plugin', () => { }); it('multi authentication plugin', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); if (!shareConn.info.isMariaDB() || !shareConn.info.hasMinVersion(10, 4, 3)) this.skip(); shareConn.query("drop user IF EXISTS mysqltest1@'%'").catch((err) => {}); shareConn diff --git a/test/integration/test-batch-callback.js b/test/integration/test-batch-callback.js index a2698d27..24b4b3dd 100644 --- a/test/integration/test-batch-callback.js +++ b/test/integration/test-batch-callback.js @@ -84,8 +84,11 @@ describe('batch callback', () => { conn.query('DROP TABLE IF EXISTS simpleBatch'); conn.query( - 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(128), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' + 'CREATE TABLE simpleBatch(' + + 'id int, id2 boolean, id3 int, t varchar(128), d datetime, d2 datetime(6), g POINT, id4 int) ' + + 'CHARSET utf8mb4' ); + conn.query('FLUSH TABLES'); const f = {}; f.toSqlString = () => { return 'blabla'; @@ -223,6 +226,7 @@ describe('batch callback', () => { conn.query('DROP TABLE IF EXISTS simpleBatchWithOptions'); conn.query('CREATE TABLE simpleBatchWithOptions(id int, d datetime)'); + conn.query('FLUSH TABLES'); const f = {}; f.toSqlString = () => { return 'blabla'; @@ -291,6 +295,7 @@ describe('batch callback', () => { conn.query('DROP TABLE IF EXISTS simpleBatchCP1251'); conn.query('CREATE TABLE simpleBatchCP1251(t varchar(128), id int) CHARSET utf8mb4'); + conn.query('FLUSH TABLES'); conn.batch( 'INSERT INTO `simpleBatchCP1251` values (?, ?)', [ @@ -384,75 +389,78 @@ describe('batch callback', () => { conn.query( 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(8), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' ); - conn.batch( - 'INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', - [ - [ - true, - 'john', - new Date('2001-12-31 23:59:58'), - new Date('2018-01-01 12:30:20.456789'), - { - type: 'Point', - coordinates: [10, 10] - } - ], - [ - false, - '12345678901', - null, - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [10, 20] - } - ], - [ - 0, - null, - new Date('2020-12-31 23:59:59'), - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [20, 20] - } - ] - ], - (err, res) => { - if (err) { - assert.isTrue( - err.message.includes("Data too long for column 't' at row 2"), - err.message - ); - conn.query('DROP TABLE simpleBatch', (err, res) => { - clearTimeout(timeout); - conn.end(() => { - done(); - }); - }); - } else { - conn.end(() => { - if ( - (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || - (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) - ) { - //field truncated must have thrown error - done(new Error('must have throw error !')); + conn.query('FLUSH TABLES', (err) => { + conn.batch( + 'INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', + [ + [ + true, + 'john', + new Date('2001-12-31 23:59:58'), + new Date('2018-01-01 12:30:20.456789'), + { + type: 'Point', + coordinates: [10, 10] + } + ], + [ + false, + '12345678901', + null, + new Date('2018-01-21 11:30:20.123456'), + { + type: 'Point', + coordinates: [10, 20] + } + ], + [ + 0, + null, + new Date('2020-12-31 23:59:59'), + new Date('2018-01-21 11:30:20.123456'), + { + type: 'Point', + coordinates: [20, 20] + } + ] + ], + (err, res) => { + if (err) { + assert.isTrue( + err.message.includes("Data too long for column 't' at row 2"), + err.message + ); + conn.query('DROP TABLE simpleBatch', (err, res) => { + clearTimeout(timeout); + conn.end(() => { + done(); + }); + }); } else { - done(); + conn.end(() => { + if ( + (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || + (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) + ) { + //field truncated must have thrown error + done(new Error('must have throw error !')); + } else { + done(); + } + }); } + } + ); + conn.query('select 1', (err, rows) => { + if (err) { + return conn.end(() => { + done(err); }); } - } - ); - conn.query('select 1', (err, rows) => { - if (err) { - return conn.end(() => { - done(err); - }); - } - assert.deepEqual(rows, [{ 1: 1 }]); + assert.deepEqual(rows, [{ 1: 1 }]); + }); }); + }); }; @@ -569,51 +577,54 @@ describe('batch callback', () => { conn.query( 'CREATE TABLE batchWithStream(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); - conn.batch( - 'INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', - [ - [1, stream1, 99], - [2, stream2, 98] - ], - (err, res) => { - if (err) { - return conn.end(() => { - done(err); - }); - } - assert.equal(res.affectedRows, 2); - conn.query('select * from `batchWithStream`', (err, res) => { - if (err) { - return conn.end(() => { - done(err); + conn.query('FLUSH TABLES', err => { + conn.batch( + 'INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', + [ + [1, stream1, 99], + [2, stream2, 98] + ], + (err, res) => { + if (err) { + return conn.end(() => { + done(err); + }); + } + assert.equal(res.affectedRows, 2); + conn.query('select * from `batchWithStream`', (err, res) => { + if (err) { + return conn.end(() => { + done(err); + }); + } + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: 99, + id5: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: 98, + id5: 3 + } + ]); + conn.query('DROP TABLE batchWithStream'); + clearTimeout(timeout); + conn.end(() => { + done(); + }); }); } - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: 99, - id5: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: 98, - id5: 3 - } - ]); - conn.query('DROP TABLE batchWithStream'); - clearTimeout(timeout); - conn.end(() => { - done(); - }); - }); - } - ); + ); + }); + }); }; @@ -676,50 +687,52 @@ describe('batch callback', () => { conn.query( 'CREATE TABLE simpleNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' ); - conn.batch( - 'INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', - [ - { param_1: 1, param_2: 'john' }, - { param_1: 2, param_2: 'jack' } - ], - (err, res) => { - if (err) { - return conn.end(() => { - done(err); - }); - } - assert.equal(res.affectedRows, 2); - conn.query('select * from `simpleNamedPlaceHolders`', (err, res) => { + conn.query('FLUSH TABLES', err => { + conn.batch( + 'INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', + [ + { param_1: 1, param_2: 'john' }, + { param_1: 2, param_2: 'jack' } + ], + (err, res) => { if (err) { return conn.end(() => { done(err); }); } - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'john', - id4: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'jack', - id4: 3 + assert.equal(res.affectedRows, 2); + conn.query('select * from `simpleNamedPlaceHolders`', (err, res) => { + if (err) { + return conn.end(() => { + done(err); + }); } - ]); - conn.query('DROP TABLE simpleNamedPlaceHolders', () => { - clearTimeout(timeout); - return conn.end(() => { - done(); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'john', + id4: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'jack', + id4: 3 + } + ]); + conn.query('DROP TABLE simpleNamedPlaceHolders', () => { + clearTimeout(timeout); + return conn.end(() => { + done(); + }); }); }); - }); - } - ); + } + ); + }); }); }; @@ -841,49 +854,51 @@ describe('batch callback', () => { conn.query( 'CREATE TABLE streamNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); - conn.batch( - 'INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', - [ - { id1: 1, id3: stream1, id4: 99, id5: 6 }, - { id1: 2, id3: stream2, id4: 98 } - ], - (err, res) => { - if (err) { - conn.end(); - return done(err); - } - assert.equal(res.affectedRows, 2); - conn.query('select * from `streamNamedPlaceHolders`', (err, res) => { + conn.query('FLUSH TABLES', err => { + conn.batch( + 'INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', + [ + { id1: 1, id3: stream1, id4: 99, id5: 6 }, + { id1: 2, id3: stream2, id4: 98 } + ], + (err, res) => { if (err) { conn.end(); return done(err); } - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: null, - id5: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: null, - id5: 3 + assert.equal(res.affectedRows, 2); + conn.query('select * from `streamNamedPlaceHolders`', (err, res) => { + if (err) { + conn.end(); + return done(err); } - ]); - conn.query('DROP TABLE streamNamedPlaceHolders'); - clearTimeout(timeout); - conn.end(() => { - done(); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: null, + id5: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: null, + id5: 3 + } + ]); + conn.query('DROP TABLE streamNamedPlaceHolders'); + clearTimeout(timeout); + conn.end(() => { + done(); + }); }); - }); - } - ); + } + ); + }); }); }; @@ -932,7 +947,7 @@ describe('batch callback', () => { describe('standard question mark using bulk', () => { const useCompression = false; it('simple batch, local date', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -940,14 +955,14 @@ describe('batch callback', () => { }); it('simple batch with option', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); simpleBatchWithOptions(useCompression, true, done); }); it('batch without parameter', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); base.createConnection({ compress: useCompression, bulk: true }).then((conn) => { conn @@ -965,7 +980,7 @@ describe('batch callback', () => { }); it('batch with erroneous parameter', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); base.createConnection({ compress: useCompression, bulk: true }).then((conn) => { conn @@ -988,7 +1003,7 @@ describe('batch callback', () => { }); it('simple batch offset date', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -996,46 +1011,46 @@ describe('batch callback', () => { }); it('simple batch encoding CP1251', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); simpleBatchEncodingCP1251(useCompression, true, 'local', done); }); it('simple batch error message ', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); simpleBatchErrorMsg(useCompression, true, done); }); it('simple batch error message packet split', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); simpleBatchErrorSplit(useCompression, true, 'local', done); }); it('non rewritable batch', function (done) { - if (process.env.SKYSQL || !supportBulk) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); this.timeout(30000); nonRewritableBatch(useCompression, true, done); }); it('16M+ error batch', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); bigBatchError(useCompression, true, done); }); it('batch with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); batchWithStream(useCompression, true, done); }); it('batch error with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); batchErrorWithStream(useCompression, true, done); }); @@ -1045,7 +1060,7 @@ describe('batch callback', () => { const useCompression = true; it('simple batch, local date', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -1053,7 +1068,7 @@ describe('batch callback', () => { }); it('simple batch offset date', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -1061,33 +1076,33 @@ describe('batch callback', () => { }); it('simple batch error message ', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); simpleBatchErrorMsg(useCompression, true, done); }); it('non rewritable batch', function (done) { - if (process.env.SKYSQL || !supportBulk) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); this.timeout(30000); nonRewritableBatch(useCompression, true, done); }); it('16M+ error batch', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); bigBatchError(useCompression, true, done); }); it('batch with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); batchWithStream(useCompression, true, done); }); it('batch error with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); batchErrorWithStream(useCompression, true, done); }); @@ -1251,32 +1266,32 @@ describe('batch callback', () => { describe('named parameter with bulk', () => { it('simple batch', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); simpleNamedPlaceHolders(true, done); }); it('simple batch error', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); simpleNamedPlaceHoldersErr(true, done); }); it('non rewritable batch', function (done) { - if (process.env.SKYSQL || !supportBulk) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); this.timeout(30000); nonRewritableHoldersErr(true, done); }); it('batch with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); streamNamedPlaceHolders(true, done); }); it('batch error with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); streamErrorNamedPlaceHolders(true, done); }); diff --git a/test/integration/test-batch-geometry-type.js b/test/integration/test-batch-geometry-type.js index 7b76dc73..50a31289 100644 --- a/test/integration/test-batch-geometry-type.js +++ b/test/integration/test-batch-geometry-type.js @@ -20,6 +20,9 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_point_batch (g POINT)'); }) + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) .then(() => { return shareConn.batch('INSERT INTO gis_point_batch VALUES (?)', [ [ @@ -117,6 +120,9 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_line_batch (g LINESTRING)'); }) + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) .then(() => { return shareConn.batch('INSERT INTO gis_line_batch VALUES (?)', [ [ @@ -225,6 +231,10 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_polygon_batch (g POLYGON)'); }) + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) + .then(() => { return shareConn.batch('INSERT INTO gis_polygon_batch VALUES (?)', [ [ @@ -411,6 +421,10 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_multi_point_batch (g MULTIPOINT)'); }) + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) + .then(() => { return shareConn.batch('INSERT INTO gis_multi_point_batch VALUES (?)', [ [ @@ -512,6 +526,10 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_multi_line_batch (g MULTILINESTRING)'); }) + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) + .then(() => { return shareConn.batch('INSERT INTO gis_multi_line_batch VALUES (?)', [ [ @@ -669,6 +687,10 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_multi_polygon_batch (g MULTIPOLYGON)'); }) + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) + .then(() => { return shareConn.batch('INSERT INTO gis_multi_polygon_batch VALUES (?)', [ [ @@ -929,19 +951,14 @@ describe('batch geometry type', () => { .catch(done); }); - it('Geometry collection insert', function (done) { + it('Geometry collection insert', async function () { if (!shareConn.info.isMariaDB()) this.skip(); - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS gis_geometrycollection_batch') - .then(() => { - return conn.query('CREATE TABLE gis_geometrycollection_batch (g GEOMETRYCOLLECTION)'); - }) - .then(() => { - return conn.batch('INSERT INTO gis_geometrycollection_batch VALUES (?)', [ + const conn = await base.createConnection(); + conn.query('DROP TABLE IF EXISTS gis_geometrycollection_batch'); + conn.query('CREATE TABLE gis_geometrycollection_batch (g GEOMETRYCOLLECTION)'); + await shareConn.query('FLUSH TABLES'); + await conn.batch('INSERT INTO gis_geometrycollection_batch VALUES (?)', [ [ { type: 'GeometryCollection', @@ -1044,123 +1061,111 @@ describe('batch geometry type', () => { } ] ]); - }) - .then(() => { - return conn.query('SELECT * FROM gis_geometrycollection_batch'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { - g: { - type: 'GeometryCollection', - geometries: [ - { - type: 'Point', - coordinates: [10, 10] - }, - { - type: 'LineString', - coordinates: [ - [0, 0], - [0, 10], - [10, 0] - ] - }, - { - type: 'MultiPoint', - coordinates: [ - [0, 0], - [10, 10], - [10, 20], - [20, 20] - ] - }, - { - type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ], - [ - [16, 0], - [16, 23], - [16, 48] - ] + const rows = await conn.query('SELECT * FROM gis_geometrycollection_batch'); + assert.deepEqual(rows, [ + { + g: { + type: 'GeometryCollection', + geometries: [ + { + type: 'Point', + coordinates: [10, 10] + }, + { + type: 'LineString', + coordinates: [ + [0, 0], + [0, 10], + [10, 0] + ] + }, + { + type: 'MultiPoint', + coordinates: [ + [0, 0], + [10, 10], + [10, 20], + [20, 20] + ] + }, + { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] + ], + [ + [16, 0], + [16, 23], + [16, 48] + ] + ] + }, + { + type: 'MultiPolygon', + coordinates: [ + [ + [ + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] + ], + [ + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] ] - }, - { - type: 'MultiPolygon', - coordinates: [ - [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] - ], - [ - [ - [59, 18], - [67, 18], - [67, 13], - [59, 13], - [59, 18] - ] - ] + ], + [ + [ + [59, 18], + [67, 18], + [67, 13], + [59, 13], + [59, 18] ] - } - ] - } - }, - { - g: { - type: 'GeometryCollection', - geometries: [ - { - type: 'Point', - coordinates: [10, 20] - } + ] ] } - }, - { - g: { - type: 'GeometryCollection', - geometries: [] - } - }, - { - g: { - type: 'GeometryCollection', - geometries: [] - } - }, - { - g: { - type: 'GeometryCollection', - geometries: [] + ] + } + }, + { + g: { + type: 'GeometryCollection', + geometries: [ + { + type: 'Point', + coordinates: [10, 20] } - } - ]); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + ] + } + }, + { + g: { + type: 'GeometryCollection', + geometries: [] + } + }, + { + g: { + type: 'GeometryCollection', + geometries: [] + } + }, + { + g: { + type: 'GeometryCollection', + geometries: [] + } + } + ]); + conn.end(); }); }); diff --git a/test/integration/test-batch.js b/test/integration/test-batch.js index 5a1ad85f..afbf83ba 100644 --- a/test/integration/test-batch.js +++ b/test/integration/test-batch.js @@ -17,41 +17,26 @@ describe('batch', () => { let maxAllowedSize, bigBuf, timezoneParam; let supportBulk; - before(function (done) { + before(async function () { timezoneParam = 'America/New_York'; supportBulk = (Conf.baseConfig.bulk === undefined ? true : Conf.baseConfig.bulk) ? (shareConn.info.serverCapabilities & Capabilities.MARIADB_CLIENT_STMT_BULK_OPERATIONS) > 0 : false; - shareConn - .query('SELECT @@max_allowed_packet as t') - .then((row) => { - maxAllowedSize = row[0].t; - if (testSize < maxAllowedSize) { - bigBuf = Buffer.alloc(testSize); - for (let i = 0; i < testSize; i++) { - bigBuf[i] = 97 + (i % 10); - } - } - const buf = Buffer.from(str); - fs.writeFile(fileName, buf, 'utf8', function (err) { - if (err) { - done(err); - } else { - done(); - } - }); - }) - .catch(done); + const row = await shareConn.query('SELECT @@max_allowed_packet as t') + maxAllowedSize = row[0].t; + if (testSize < maxAllowedSize) { + bigBuf = Buffer.alloc(testSize); + for (let i = 0; i < testSize; i++) { + bigBuf[i] = 97 + (i % 10); + } + } + const buf = Buffer.from(str); + fs.writeFileSync(fileName, buf, 'utf8'); }); - beforeEach(function (done) { + beforeEach(async function () { //just to ensure shared connection is not closed by server due to inactivity - shareConn - .ping() - .then(() => { - done(); - }) - .catch(done); + await shareConn.ping() }); after(function () { @@ -60,31 +45,27 @@ describe('batch', () => { }); }); - const simpleBatch = (useCompression, useBulk, timezone, done) => { - base - .createConnection({ + const simpleBatch = async (useCompression, useBulk, timezone) => { + const conn = await base.createConnection({ compress: useCompression, bulk: useBulk, timezone: timezone - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - - conn - .query('DROP TABLE IF EXISTS simpleBatch') - .then(() => { - return conn.query( - 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(128), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' - ); - }) - .then(() => { - const f = {}; - f.toSqlString = () => { - return 'blabla'; - }; - return conn.batch('INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', [ + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + + conn.query('DROP TABLE IF EXISTS simpleBatch'); + conn.query( + 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(128), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' + ); + await shareConn.query('FLUSH TABLES'); + + const f = {}; + f.toSqlString = () => { + return 'blabla'; + }; + let res = await conn.batch('INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', [ [ true, 'Ʉjo"h\u000An😎🌶\\\\', @@ -126,433 +107,320 @@ describe('batch', () => { } ] ]); - }) - .then((res) => { - assert.equal(res.affectedRows, 4); - return conn.query('select * from `simpleBatch`'); - }) - .then((res) => { - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'Ʉjo"h\u000An😎🌶\\\\', - d: new Date('2001-12-31 23:59:58+3'), - d2: new Date('2018-01-01 12:30:20.456789+3'), - g: { - type: 'Point', - coordinates: [10, 10] - }, - id4: 3 - }, - { - id: 1, - id2: 1, - id3: 2, - t: 'blabla', - d: new Date('2001-12-31 23:59:58+3'), - d2: new Date('2018-01-01 12:30:20.456789+3'), - g: { - type: 'Point', - coordinates: [10, 10] - }, - id4: 3 - }, - { - id: 1, - id2: 0, - id3: 2, - t: '{"name":"jack\\nमस्","val":"tt"}', - d: null, - d2: new Date('2018-01-21 11:30:20.123456+3'), - g: { - type: 'Point', - coordinates: [10, 20] - }, - id4: 3 - }, - { - id: 1, - id2: 0, - id3: 2, - t: null, - d: new Date('2020-12-31 23:59:59+3'), - d2: new Date('2018-01-21 11:30:20.123456+3'), - g: { - type: 'Point', - coordinates: [20, 20] - }, - id4: 3 - } - ]); - return conn.query('DROP TABLE simpleBatch'); - }) - .then((res) => { - clearTimeout(timeout); - conn.end(); - done(); - }) - .catch(done); + assert.equal(res.affectedRows, 4); + res = await conn.query('select * from `simpleBatch`'); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'Ʉjo"h\u000An😎🌶\\\\', + d: new Date('2001-12-31 23:59:58+3'), + d2: new Date('2018-01-01 12:30:20.456789+3'), + g: { + type: 'Point', + coordinates: [10, 10] + }, + id4: 3 + }, + { + id: 1, + id2: 1, + id3: 2, + t: 'blabla', + d: new Date('2001-12-31 23:59:58+3'), + d2: new Date('2018-01-01 12:30:20.456789+3'), + g: { + type: 'Point', + coordinates: [10, 10] + }, + id4: 3 + }, + { + id: 1, + id2: 0, + id3: 2, + t: '{"name":"jack\\nमस्","val":"tt"}', + d: null, + d2: new Date('2018-01-21 11:30:20.123456+3'), + g: { + type: 'Point', + coordinates: [10, 20] + }, + id4: 3 + }, + { + id: 1, + id2: 0, + id3: 2, + t: null, + d: new Date('2020-12-31 23:59:59+3'), + d2: new Date('2018-01-21 11:30:20.123456+3'), + g: { + type: 'Point', + coordinates: [20, 20] + }, + id4: 3 + } + ]); + conn.query('DROP TABLE simpleBatch'); + clearTimeout(timeout); - return conn - .query('select 1') - .then((rows) => { - assert.deepEqual(rows, [{ 1: 1 }]); - }) - .catch(done); - }) - .catch(done); + const rows = await conn.query('select 1'); + assert.deepEqual(rows, [{ 1: 1 }]); + conn.end(); }; - const simpleBatchWithOptions = (useCompression, useBulk, done) => { - base - .createConnection({ compress: useCompression, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - - conn - .query('DROP TABLE IF EXISTS simpleBatchWithOptions') - .then(() => { - return conn.query('CREATE TABLE simpleBatchWithOptions(id int, d datetime)'); - }) - .then(() => { - const f = {}; - f.toSqlString = () => { - return 'blabla'; - }; - return conn.batch( - { - sql: 'INSERT INTO `simpleBatchWithOptions` values (?, ?)', - maxAllowedPacket: 1048576 - }, - [ - [1, new Date('2001-12-31 23:59:58')], - [2, new Date('2001-12-31 23:59:58')] - ] - ); - }) - .then((res) => { - assert.equal(res.affectedRows, 2); - return conn.query('select * from `simpleBatchWithOptions`'); - }) - .then((res) => { - assert.deepEqual(res, [ - { - id: 1, - d: new Date('2001-12-31 23:59:58') - }, - { - id: 2, - d: new Date('2001-12-31 23:59:58') - } - ]); - return conn.query('DROP TABLE simpleBatchWithOptions'); - }) - .then((res) => { - clearTimeout(timeout); - conn.end(); - done(); - }) - .catch(done); + const simpleBatchWithOptions = async (useCompression, useBulk) => { + const conn = await base.createConnection({ compress: useCompression, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); - return conn - .query('select 1') - .then((rows) => { - assert.deepEqual(rows, [{ 1: 1 }]); - }) - .catch(done); - }) - .catch(done); + conn.query('DROP TABLE IF EXISTS simpleBatchWithOptions'); + conn.query('CREATE TABLE simpleBatchWithOptions(id int, d datetime)'); + await shareConn.query('FLUSH TABLES'); + + const f = {}; + f.toSqlString = () => { + return 'blabla'; + }; + let res = await conn.batch( + { + sql: 'INSERT INTO `simpleBatchWithOptions` values (?, ?)', + maxAllowedPacket: 1048576 + }, + [ + [1, new Date('2001-12-31 23:59:58')], + [2, new Date('2001-12-31 23:59:58')] + ] + ); + assert.equal(res.affectedRows, 2); + res = await conn.query('select * from `simpleBatchWithOptions`'); + assert.deepEqual(res, [ + { + id: 1, + d: new Date('2001-12-31 23:59:58') + }, + { + id: 2, + d: new Date('2001-12-31 23:59:58') + } + ]); + conn.query('DROP TABLE simpleBatchWithOptions'); + clearTimeout(timeout); + conn.end(); }; - const simpleBatchEncodingCP1251 = (useCompression, useBulk, timezone, done) => { - base + const simpleBatchEncodingCP1251 = async (useCompression, useBulk, timezone) => { + const conn = await base .createConnection({ compress: useCompression, bulk: useBulk, collation: 'CP1251_GENERAL_CI' - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - - conn - .query('DROP TABLE IF EXISTS simpleBatchCP1251') - .then(() => { - return conn.query( - 'CREATE TABLE simpleBatchCP1251(t varchar(128), id int) CHARSET utf8mb4' - ); - }) - .then(() => { - return conn.batch('INSERT INTO `simpleBatchCP1251` values (?, ?)', [ - ['john', 2], - ['©°', 3] - ]); - }) - .then((res) => { - assert.equal(res.affectedRows, 2); - return conn.query('select * from `simpleBatchCP1251`'); - }) - .then((res) => { - assert.deepEqual(res, [ - { id: 2, t: 'john' }, - { id: 3, t: '©°' } - ]); - return conn.query('DROP TABLE simpleBatchCP1251'); - }) - .then((res) => { - clearTimeout(timeout); - conn.end(); - done(); - }) - .catch(done); - - return conn - .query('select 1') - .then((rows) => { - assert.deepEqual(rows, [{ 1: 1 }]); - }) - .catch(done); - }) - .catch(done); + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + + conn.query('DROP TABLE IF EXISTS simpleBatchCP1251'); + conn.query('CREATE TABLE simpleBatchCP1251(t varchar(128), id int) CHARSET utf8mb4'); + await shareConn.query('FLUSH TABLES'); + let res = await conn.batch('INSERT INTO `simpleBatchCP1251` values (?, ?)', [ + ['john', 2], + ['©°', 3] + ]); + assert.equal(res.affectedRows, 2); + res = await conn.query('select * from `simpleBatchCP1251`'); + assert.deepEqual(res, [ + { id: 2, t: 'john' }, + { id: 3, t: '©°' } + ]); + conn.query('DROP TABLE simpleBatchCP1251'); + clearTimeout(timeout); + conn.end(); }; - const simpleBatchErrorMsg = (compression, useBulk, done) => { - base - .createConnection({ trace: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn - .batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ + const simpleBatchErrorMsg = async (compression, useBulk) => { + const conn = await base.createConnection({ trace: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + try { + await conn.batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ [1, 'john'], [2, 'jack'] - ]) - .then(() => { - done(new Error('must have thrown error !')); - }) - .catch((err) => { - assert.isTrue(err != null); - assert.isTrue(err.message.includes(" doesn't exist")); - assert.isTrue( - err.message.includes( - "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'john'],[2,'jack']]" - ) - ); - assert.equal(err.errno, 1146); - assert.equal(err.sqlState, '42S02'); - assert.equal(err.code, 'ER_NO_SUCH_TABLE'); - conn.end(); - clearTimeout(timeout); - done(); - }); - }) - .catch(done); + ]); + throw new Error('must have thrown error !'); + } catch(err) { + assert.isTrue(err != null); + assert.isTrue(err.message.includes(" doesn't exist")); + assert.isTrue( + err.message.includes( + "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'john'],[2,'jack']]" + ) + ); + assert.equal(err.errno, 1146); + assert.equal(err.sqlState, '42S02'); + assert.equal(err.code, 'ER_NO_SUCH_TABLE'); + conn.end(); + clearTimeout(timeout); + } }; - const noValueBatch = (compression, useBulk, done) => { - base - .createConnection({ trace: true, bulk: useBulk }) - .then((conn) => { - conn.query('DROP TABLE IF EXISTS noValueBatch'); - conn.query('CREATE TABLE noValueBatch(id int not null primary key auto_increment)'); - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 2000); - conn - .batch('INSERT INTO noValueBatch values ()', []) - .then(() => { - return conn.query('SELECT COUNT(*) as nb FROM noValueBatch'); - }) - .then((res) => { - assert.equal(res[0].nb, 1); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - clearTimeout(timeout); - done(err); - }); - }) - .catch(done); + const noValueBatch = async (compression, useBulk) => { + const conn = await base.createConnection({ trace: true, bulk: useBulk }); + await conn.query('DROP TABLE IF EXISTS noValueBatch'); + await conn.query('CREATE TABLE noValueBatch(id int not null primary key auto_increment)'); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 2000); + await shareConn.query('FLUSH TABLES'); + + await conn.batch('INSERT INTO noValueBatch values ()', []); + const res = await conn.query('SELECT COUNT(*) as nb FROM noValueBatch'); + assert.equal(res[0].nb, 1); + conn.end(); + clearTimeout(timeout); }; - const simpleBatchErrorSplit = (useCompression, useBulk, timezone, done) => { - base - .createConnection({ + const simpleBatchErrorSplit = async (useCompression, useBulk, timezone) => { + const conn = await base.createConnection({ compress: useCompression, bulk: useBulk, timezone: timezone - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - - conn.query('DROP TABLE IF EXISTS simpleBatch'); - conn.query( - 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(8), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' - ); - conn - .batch('INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', [ - [ - true, - 'john', - new Date('2001-12-31 23:59:58'), - new Date('2018-01-01 12:30:20.456789'), - { - type: 'Point', - coordinates: [10, 10] - } - ], - [ - false, - '12345678901', - null, - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [10, 20] - } - ], - [ - 0, - null, - new Date('2020-12-31 23:59:59'), - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [20, 20] - } - ] - ]) - .then((res) => { - conn.end(); - if ( - (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || - (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) - ) { - //field truncated must have thrown error - done(new Error('must have throw error !')); - } else { - done(); - } - }) - .catch((err) => { - assert.isTrue( - err.message.includes("Data too long for column 't' at row 2"), - err.message - ); - conn - .query('DROP TABLE simpleBatch') - .then((res) => { - clearTimeout(timeout); - conn.end(); - done(); - }) - .catch(done); - }); - conn - .query('select 1') - .then((rows) => { - assert.deepEqual(rows, [{ 1: 1 }]); - }) - .catch(done); - }) - .catch(done); + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + + conn.query('DROP TABLE IF EXISTS simpleBatch'); + conn.query( + 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(8), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + try { + let res = await conn.batch('INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', [ + [ + true, + 'john', + new Date('2001-12-31 23:59:58'), + new Date('2018-01-01 12:30:20.456789'), + { + type: 'Point', + coordinates: [10, 10] + } + ], + [ + false, + '12345678901', + null, + new Date('2018-01-21 11:30:20.123456'), + { + type: 'Point', + coordinates: [10, 20] + } + ], + [ + 0, + null, + new Date('2020-12-31 23:59:59'), + new Date('2018-01-21 11:30:20.123456'), + { + type: 'Point', + coordinates: [20, 20] + } + ] + ]); + if ( + (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || + (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) + ) { + //field truncated must have thrown error + throw new Error('must have throw error !'); + } + } catch (err) { + assert.isTrue( + err.message.includes("Data too long for column 't' at row 2"), + err.message + ); + } + conn.query('DROP TABLE simpleBatch') + conn.end(); + clearTimeout(timeout); }; - const nonRewritableBatch = (useCompression, useBulk, done) => { - base - .createConnection({ compress: useCompression, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn - .batch('SELECT ? as id, ? as t', [ - [1, 'john'], - [2, 'jack'] - ]) - .then((res) => { - clearTimeout(timeout); - if (useBulk && conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { - done(new Error('Must have thrown an exception')); - } else { - assert.deepEqual(res, [ - [ - { - id: 1, - t: 'john' - } - ], - [ - { - id: 2, - t: 'jack' - } - ] - ]); - done(); - } - conn.end(); - }) - .catch((err) => { - conn.end(); - clearTimeout(timeout); - if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { - assert.isTrue( - err.message.includes( - 'This command is not supported in the prepared statement protocol yet' - ), - err.message - ); - done(); - } else { - done(err); - } - }); - }) - .catch(done); + const nonRewritableBatch = async (useCompression, useBulk) => { + const conn = await base.createConnection({ compress: useCompression, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + try { + let res = await conn.batch('SELECT ? as id, ? as t', [ + [1, 'john'], + [2, 'jack'] + ]); + if (useBulk && conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { + throw new Error('Must have thrown an exception'); + } + assert.deepEqual(res, [ + [ + { + id: 1, + t: 'john' + } + ], + [ + { + id: 2, + t: 'jack' + } + ] + ]); + } catch (err) { + if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { + assert.isTrue( + err.message.includes( + 'This command is not supported in the prepared statement protocol yet' + ), + err.message + ); + } + } + clearTimeout(timeout); + conn.end(); }; - const bigBatchWith16mMaxAllowedPacket = (useCompression, useBulk, done) => { - base + const bigBatchWith16mMaxAllowedPacket = async (useCompression, useBulk) => { + const conn = await base .createConnection({ compress: useCompression, maxAllowedPacket: 16 * 1024 * 1024, bulk: useBulk, logPackets: true - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - conn.query('DROP TABLE IF EXISTS bigBatchWith16mMaxAllowedPacket'); - conn.query( - 'CREATE TABLE bigBatchWith16mMaxAllowedPacket(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' - ); - const values = []; - for (let i = 0; i < 1000000; i++) { - values.push([i, str]); - } - conn - .batch('INSERT INTO `bigBatchWith16mMaxAllowedPacket` values (1, ?, 2, ?, 3)', values) - .then((res) => { - assert.equal(res.affectedRows, 1000000); - }) - .catch(done); - let currRow = 0; - conn + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + conn.query('DROP TABLE IF EXISTS bigBatchWith16mMaxAllowedPacket'); + conn.query( + 'CREATE TABLE bigBatchWith16mMaxAllowedPacket(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + + const values = []; + for (let i = 0; i < 1000000; i++) { + values.push([i, str]); + } + let res = await conn + .batch('INSERT INTO `bigBatchWith16mMaxAllowedPacket` values (1, ?, 2, ?, 3)', values); + assert.equal(res.affectedRows, 1000000); + let currRow = 0; + return new Promise(function (resolve, reject) { + conn .queryStream('select * from `bigBatchWith16mMaxAllowedPacket`') .on('error', (err) => { - done(new Error('must not have thrown any error !')); + reject(new Error('must not have thrown any error !')); }) .on('data', (row) => { assert.deepEqual(row, { @@ -566,49 +434,41 @@ describe('batch', () => { }) .on('end', () => { assert.equal(1000000, currRow); - conn - .query('DROP TABLE bigBatchWith16mMaxAllowedPacket') - .then((res) => { - clearTimeout(timeout); - conn.end(); - done(); - }) - .catch(done); + conn.query('DROP TABLE bigBatchWith16mMaxAllowedPacket'); + clearTimeout(timeout); + conn.end(); + resolve(); }); - }) - .catch(done); + }); }; - const bigBatchWith4mMaxAllowedPacket = (useCompression, useBulk, done) => { - base + const bigBatchWith4mMaxAllowedPacket = async (useCompression, useBulk) => { + const conn = await base .createConnection({ compress: useCompression, bulk: useBulk, logPackets: true - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - conn.query('DROP TABLE IF EXISTS bigBatchWith4mMaxAllowedPacket'); - conn.query( - 'CREATE TABLE bigBatchWith4mMaxAllowedPacket(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' - ); - const values = []; - for (let i = 0; i < 1000000; i++) { - values.push([i, str]); - } - conn - .batch('INSERT INTO `bigBatchWith4mMaxAllowedPacket` values (1, ?, 2, ?, 3)', values) - .then((res) => { - assert.equal(res.affectedRows, 1000000); - }) - .catch(done); - let currRow = 0; - conn + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + conn.query('DROP TABLE IF EXISTS bigBatchWith4mMaxAllowedPacket'); + conn.query( + 'CREATE TABLE bigBatchWith4mMaxAllowedPacket(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + const values = []; + for (let i = 0; i < 1000000; i++) { + values.push([i, str]); + } + let res = await conn.batch('INSERT INTO `bigBatchWith4mMaxAllowedPacket` values (1, ?, 2, ?, 3)', values); + assert.equal(res.affectedRows, 1000000); + let currRow = 0; + return new Promise(function (resolve, reject) { + conn .queryStream('select * from `bigBatchWith4mMaxAllowedPacket`') .on('error', (err) => { - done(new Error('must not have thrown any error !')); + reject(new Error('must not have thrown any error !')); }) .on('data', (row) => { assert.deepEqual(row, { @@ -625,215 +485,178 @@ describe('batch', () => { conn.query('DROP TABLE bigBatchWith4mMaxAllowedPacket'); clearTimeout(timeout); conn.end(); - done(); + resolve(); }); - }) - .catch(done); + }); }; - const bigBatchError = (useCompression, useBulk, done) => { - base + const bigBatchError = async (useCompression, useBulk) => { + const conn = await base .createConnection({ compress: useCompression, bulk: useBulk, logPackets: true - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - const values = []; - for (let i = 0; i < 1000000; i++) { - values.push([i, str]); - } - conn - .batch('INSERT INTO `bigBatchError` values (1, ?, 2, ?, 3)', values) - .then((res) => { - done(new Error('must have thrown error !')); - }) - .catch((err) => { - conn - .query('select 1') - .then((rows) => { - assert.deepEqual(rows, [{ 1: 1 }]); - clearTimeout(timeout); - conn.end(); - done(); - }) - .catch(done); - }); - }) - .catch(done); + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + const values = []; + for (let i = 0; i < 1000000; i++) { + values.push([i, str]); + } + try { + await conn.batch('INSERT INTO `bigBatchError` values (1, ?, 2, ?, 3)', values); + throw new Error('must have thrown error !'); + } catch (err) { + const rows = await conn.query('select 1'); + assert.deepEqual(rows, [{ 1: 1 }]); + clearTimeout(timeout); + conn.end(); + } }; - const singleBigInsertWithoutMaxAllowedPacket = (useCompression, useBulk, done) => { - base - .createConnection({ compress: useCompression, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn.query('DROP TABLE IF EXISTS singleBigInsertWithoutMaxAllowedPacket'); - conn.query( - 'CREATE TABLE singleBigInsertWithoutMaxAllowedPacket(id int, id2 int, id3 int, t longtext, id4 int) CHARSET utf8mb4' - ); - conn + const singleBigInsertWithoutMaxAllowedPacket = async (useCompression, useBulk) => { + const conn = await base.createConnection({ compress: useCompression, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + conn.query('DROP TABLE IF EXISTS singleBigInsertWithoutMaxAllowedPacket'); + conn.query( + 'CREATE TABLE singleBigInsertWithoutMaxAllowedPacket(id int, id2 int, id3 int, t longtext, id4 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLE'); + const res = await conn .batch('INSERT INTO `singleBigInsertWithoutMaxAllowedPacket` values (1, ?, 2, ?, 3)', [ [1, bigBuf], [2, 'john'] - ]) - .then((res) => { - assert.equal(res.affectedRows, 2); - conn - .query('select * from `singleBigInsertWithoutMaxAllowedPacket`') - .then((rows) => { - assert.deepEqual(rows, [ - { - id: 1, - id2: 1, - id3: 2, - t: bigBuf.toString(), - id4: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'john', - id4: 3 - } - ]); - conn.query('DROP TABLE singleBigInsertWithoutMaxAllowedPacket'); - clearTimeout(timeout); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); - }) - .catch(done); + ]); + assert.equal(res.affectedRows, 2); + const rows = await conn.query('select * from `singleBigInsertWithoutMaxAllowedPacket`'); + assert.deepEqual(rows, [ + { + id: 1, + id2: 1, + id3: 2, + t: bigBuf.toString(), + id4: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'john', + id4: 3 + } + ]); + conn.query('DROP TABLE singleBigInsertWithoutMaxAllowedPacket'); + clearTimeout(timeout); + conn.end(); }; - const batchWithStream = (useCompression, useBulk, done) => { + const batchWithStream = async (useCompression, useBulk) => { const stream1 = fs.createReadStream(fileName); const stream2 = fs.createReadStream(fileName); - base + const conn = await base .createConnection({ compress: useCompression, bulk: useBulk, logPackets: true - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn.query('DROP TABLE IF EXISTS batchWithStream'); - conn.query( - 'CREATE TABLE batchWithStream(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' - ); - conn - .batch('INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', [ - [1, stream1, 99], - [2, stream2, 98] - ]) - .then((res) => { - assert.equal(res.affectedRows, 2); - conn.query('select * from `batchWithStream`').then((res) => { - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: str, - id4: 99, - id5: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: str, - id4: 98, - id5: 3 - } - ]); - conn.query('DROP TABLE batchWithStream'); - clearTimeout(timeout); - conn.end(); - done(); - }); - }) - .catch(done); - }) - .catch(done); + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + conn.query('DROP TABLE IF EXISTS batchWithStream'); + conn.query( + 'CREATE TABLE batchWithStream(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + let res = await conn.batch('INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', [ + [1, stream1, 99], + [2, stream2, 98] + ]); + assert.equal(res.affectedRows, 2); + res = await conn.query('select * from `batchWithStream`'); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: str, + id4: 99, + id5: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: str, + id4: 98, + id5: 3 + } + ]); + conn.query('DROP TABLE batchWithStream'); + clearTimeout(timeout); + conn.end(); }; - const batchErrorWithStream = (useCompression, useBulk, done) => { + const batchErrorWithStream = async (useCompression, useBulk) => { const stream1 = fs.createReadStream(fileName); const stream2 = fs.createReadStream(fileName); - base - .createConnection({ compress: useCompression, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn + const conn = await base.createConnection({compress: useCompression, bulk: useBulk}); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + try { + await conn .batch('INSERT INTO batchErrorWithStream values (1, ?, 2, ?, ?, 3)', [ [1, stream1, 99], [2, stream2, 98] - ]) - .then(() => { - done(new Error('must have thrown error !')); - }) - .catch((err) => { - assert.isTrue(err != null); - assert.isTrue(err.message.includes(" doesn't exist")); - assert.isTrue( - err.message.includes( - 'sql: INSERT INTO batchErrorWithStream values (1, ?, 2, ?, ?, 3) - parameters:[[1,[object Object],99],[2,[object Object],98]]' - ) - ); - assert.equal(err.errno, 1146); - assert.equal(err.sqlState, '42S02'); - assert.equal(err.code, 'ER_NO_SUCH_TABLE'); - clearTimeout(timeout); - conn.end(); - done(); - }); - }) - .catch(done); + ]); + throw new Error('must have thrown error !'); + } catch (err) { + assert.isTrue(err != null); + assert.isTrue(err.message.includes(" doesn't exist")); + assert.isTrue( + err.message.includes( + 'sql: INSERT INTO batchErrorWithStream values (1, ?, 2, ?, ?, 3) - parameters:[[1,[object Object],99],[2,[object Object],98]]' + ) + ); + assert.equal(err.errno, 1146); + assert.equal(err.sqlState, '42S02'); + assert.equal(err.code, 'ER_NO_SUCH_TABLE'); + clearTimeout(timeout); + conn.end(); + } }; - const bigBatchWithStreams = (useCompression, useBulk, done) => { + const bigBatchWithStreams = async (useCompression, useBulk) => { const values = []; for (let i = 0; i < 1000000; i++) { if (i % 100000 === 0) values.push([i, fs.createReadStream(fileName), i * 2]); else values.push([i, str, i * 2]); } - base + const conn = await base .createConnection({ compress: useCompression, bulk: useBulk, logPackets: true - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - conn.query('DROP TABLE IF EXISTS bigBatchWithStreams'); - conn.query( - 'CREATE TABLE bigBatchWithStreams(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' - ); - conn - .batch('INSERT INTO `bigBatchWithStreams` values (1, ?, 2, ?, ?, 3)', values) - .then((res) => { - assert.equal(res.affectedRows, 1000000); - let currRow = 0; + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + conn.query('DROP TABLE IF EXISTS bigBatchWithStreams'); + conn.query( + 'CREATE TABLE bigBatchWithStreams(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + let res = await conn.batch('INSERT INTO `bigBatchWithStreams` values (1, ?, 2, ?, ?, 3)', values); + assert.equal(res.affectedRows, 1000000); + let currRow = 0; + return new Promise(function (resolve, reject) { conn .queryStream('select * from `bigBatchWithStreams`') .on('error', (err) => { - done(new Error('must not have thrown any error !')); + reject(new Error('must not have thrown any error !')); }) .on('data', (row) => { assert.deepEqual(row, { @@ -851,212 +674,169 @@ describe('batch', () => { conn.query('DROP TABLE bigBatchWithStreams'); clearTimeout(timeout); conn.end(); - done(); + resolve(); }); - }) - .catch(done); - }) - .catch(done); + }); }; - const bigBatchErrorWithStreams = (useCompression, useBulk, done) => { + const bigBatchErrorWithStreams = async (useCompression, useBulk) => { const values = []; for (let i = 0; i < 1000000; i++) { if (i % 100000 === 0) values.push([i, fs.createReadStream(fileName), i * 2]); else values.push([i, str, i * 2]); } - base + const conn = await base .createConnection({ compress: useCompression, bulk: useBulk, logPackets: true - }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - conn - .batch('INSERT INTO `blabla` values (1, ?, 2, ?, ?, 3)', values) - .then((res) => { - done(new Error('must have thrown error !')); - }) - .catch((err) => { - conn - .query('select 1') - .then((rows) => { - assert.deepEqual(rows, [{ 1: 1 }]); - conn.end(); - clearTimeout(timeout); - done(); - }) - .catch(done); - }); - }) - .catch(done); + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + try { + await conn + .batch('INSERT INTO `blabla` values (1, ?, 2, ?, ?, 3)', values); + throw new Error('must have thrown error !'); + } catch (err) { + const rows = await conn.query('select 1'); + assert.deepEqual(rows, [{ 1: 1 }]); + conn.end(); + clearTimeout(timeout); + } }; - const simpleNamedPlaceHolders = (useBulk, done) => { - base - .createConnection({ namedPlaceholders: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn.query('DROP TABLE IF EXISTS simpleNamedPlaceHolders'); - conn.query( - 'CREATE TABLE simpleNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' - ); - conn - .batch('INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', [ - { param_1: 1, param_2: 'john' }, - { param_1: 2, param_2: 'jack' } - ]) - .then((res) => { - assert.equal(res.affectedRows, 2); - conn - .query('select * from `simpleNamedPlaceHolders`') - .then((res) => { - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'john', - id4: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'jack', - id4: 3 - } - ]); - conn.query('DROP TABLE simpleNamedPlaceHolders'); - conn.end(); - clearTimeout(timeout); - done(); - }) - .catch(done); - }) - .catch(done); - }) - .catch(done); + const simpleNamedPlaceHolders = async (useBulk) => { + const conn = await base.createConnection({ namedPlaceholders: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + conn.query('DROP TABLE IF EXISTS simpleNamedPlaceHolders'); + conn.query( + 'CREATE TABLE simpleNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + let res = await conn + .batch('INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', [ + { param_1: 1, param_2: 'john' }, + { param_1: 2, param_2: 'jack' } + ]); + assert.equal(res.affectedRows, 2); + res = await conn.query('select * from `simpleNamedPlaceHolders`'); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'john', + id4: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'jack', + id4: 3 + } + ]); + conn.query('DROP TABLE simpleNamedPlaceHolders'); + conn.end(); + clearTimeout(timeout); }; - const simpleNamedPlaceHoldersErr = (useBulk, done) => { - base - .createConnection({ namedPlaceholders: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn + const simpleNamedPlaceHoldersErr = async (useBulk) => { + const conn = await base.createConnection({ namedPlaceholders: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + try { + await conn .batch('INSERT INTO blabla values (1, :param_1, 2, :param_2, 3)', [ { param_1: 1, param_2: 'john' }, { param_1: 2, param_2: 'jack' } - ]) - .then(() => { - done(new Error('must have thrown error !')); - }) - .catch((err) => { - assert.isTrue(err != null); - assert.isTrue(err.message.includes(" doesn't exist")); - assert.isTrue( - err.message.includes( - "sql: INSERT INTO blabla values (1, :param_1, 2, :param_2, 3) - parameters:[{'param_1':1,'param_2':'john'},{'param_1':2,'param_2':'jack'}]" - ) - ); - assert.equal(err.errno, 1146); - assert.equal(err.sqlState, '42S02'); - assert.equal(err.code, 'ER_NO_SUCH_TABLE'); - clearTimeout(timeout); - conn.end(); - done(); - }); - }) - .catch(done); + ]); + throw new Error('must have thrown error !'); + } catch (err) { + assert.isTrue(err != null); + assert.isTrue(err.message.includes(" doesn't exist")); + assert.isTrue( + err.message.includes( + "sql: INSERT INTO blabla values (1, :param_1, 2, :param_2, 3) - parameters:[{'param_1':1,'param_2':'john'},{'param_1':2,'param_2':'jack'}]" + ) + ); + assert.equal(err.errno, 1146); + assert.equal(err.sqlState, '42S02'); + assert.equal(err.code, 'ER_NO_SUCH_TABLE'); + clearTimeout(timeout); + conn.end(); + } }; - const nonRewritableHoldersErr = (useBulk, done) => { - base - .createConnection({ namedPlaceholders: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn + const nonRewritableHoldersErr = async (useBulk) => { + const conn = await base.createConnection({ namedPlaceholders: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + try { + const res = await conn .batch('SELECT :id2 as id, :id1 as t', [ { id2: 1, id1: 'john' }, { id1: 'jack', id2: 2 } - ]) - .then((res) => { - conn.end(); - if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { - done(new Error('Must have thrown an exception')); - } else { - assert.deepEqual(res, [ - [ - { - id: 1, - t: 'john' - } - ], - [ - { - id: 2, - t: 'jack' - } - ] - ]); - clearTimeout(timeout); - done(); - } - }) - .catch((err) => { - conn.end(); - if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { - assert.isTrue( - err.message.includes( - 'This command is not supported in the prepared statement protocol yet' - ) - ); - clearTimeout(timeout); - done(); - } else { - done(err); - } - }); - }) - .catch(done); - }; - - const more16MNamedPlaceHolders = function (useBulk, done) { - base - .createConnection({ namedPlaceholders: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - conn.query('DROP TABLE IF EXISTS more16MNamedPlaceHolders'); - conn.query( - 'CREATE TABLE more16MNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' + ]); + if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { + conn.end(); + throw new Error('Must have thrown an exception'); + } + assert.deepEqual(res, [ + [ + { + id: 1, + t: 'john' + } + ], + [ + { + id: 2, + t: 'jack' + } + ] + ]); + } catch (err) { + if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { + assert.isTrue( + err.message.includes( + 'This command is not supported in the prepared statement protocol yet' + ) ); - const values = []; - for (let i = 0; i < 1000000; i++) { - values.push({ id1: i, id2: str }); - } - conn - .batch('INSERT INTO `more16MNamedPlaceHolders` values (1, :id1, 2, :id2, 3)', values) - .then((res) => { - assert.equal(res.affectedRows, 1000000); + } + } + conn.end(); + clearTimeout(timeout); + }; - let currRow = 0; + const more16MNamedPlaceHolders = async function (useBulk) { + const conn = await base.createConnection({ namedPlaceholders: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + conn.query('DROP TABLE IF EXISTS more16MNamedPlaceHolders'); + conn.query( + 'CREATE TABLE more16MNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + const values = []; + for (let i = 0; i < 1000000; i++) { + values.push({ id1: i, id2: str }); + } + const res = await conn.batch('INSERT INTO `more16MNamedPlaceHolders` values (1, :id1, 2, :id2, 3)', values); + assert.equal(res.affectedRows, 1000000); + let currRow = 0; + return new Promise(function (resolve, reject) { conn .queryStream('select * from `more16MNamedPlaceHolders`') .on('error', (err) => { - done(new Error('must not have thrown any error !')); + reject(new Error('must not have thrown any error !')); }) .on('data', (row) => { assert.deepEqual(row, { @@ -1073,150 +853,122 @@ describe('batch', () => { conn.query('DROP TABLE more16MNamedPlaceHolders'); clearTimeout(timeout); conn.end(); - done(); + resolve(); }); - }) - .catch(done); - }) - .catch(done); + }); }; - const more16MSingleNamedPlaceHolders = function (useBulk, done) { - base - .createConnection({ namedPlaceholders: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - conn.query('DROP TABLE IF EXISTS more16MSingleNamedPlaceHolders'); - conn.query( - 'CREATE TABLE more16MSingleNamedPlaceHolders(id int, id2 int, id3 int, t longtext, id4 int) CHARSET utf8mb4' - ); - conn + const more16MSingleNamedPlaceHolders = async function (useBulk) { + const conn = await base + .createConnection({ namedPlaceholders: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + conn.query('DROP TABLE IF EXISTS more16MSingleNamedPlaceHolders'); + conn.query( + 'CREATE TABLE more16MSingleNamedPlaceHolders(id int, id2 int, id3 int, t longtext, id4 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + const res = await conn .batch('INSERT INTO `more16MSingleNamedPlaceHolders` values (1, :id, 2, :id2, 3)', [ { id: 1, id2: bigBuf }, { id: 2, id2: 'john' } - ]) - .then((res) => { - assert.equal(res.affectedRows, 2); - conn - .query('select * from `more16MSingleNamedPlaceHolders`') - .then((rows) => { - assert.deepEqual(rows, [ - { - id: 1, - id2: 1, - id3: 2, - t: bigBuf.toString(), - id4: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'john', - id4: 3 - } - ]); - conn.query('DROP TABLE more16MSingleNamedPlaceHolders'); - clearTimeout(timeout); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); - }) - .catch(done); + ]); + assert.equal(res.affectedRows, 2); + const rows = await conn.query('select * from `more16MSingleNamedPlaceHolders`'); + assert.deepEqual(rows, [ + { + id: 1, + id2: 1, + id3: 2, + t: bigBuf.toString(), + id4: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'john', + id4: 3 + } + ]); + conn.query('DROP TABLE more16MSingleNamedPlaceHolders'); + clearTimeout(timeout); + conn.end(); }; - const streamNamedPlaceHolders = (useBulk, done) => { + const streamNamedPlaceHolders = async (useBulk) => { const stream1 = fs.createReadStream(fileName); const stream2 = fs.createReadStream(fileName); - base - .createConnection({ namedPlaceholders: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn.query('DROP TABLE IF EXISTS streamNamedPlaceHolders'); - conn.query( - 'CREATE TABLE streamNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' - ); - conn - .batch('INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', [ + const conn = await base.createConnection({ namedPlaceholders: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + conn.query('DROP TABLE IF EXISTS streamNamedPlaceHolders'); + conn.query( + 'CREATE TABLE streamNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLE'); + const res = await conn.batch('INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', [ { id1: 1, id3: stream1, id4: 99, id5: 6 }, { id1: 2, id3: stream2, id4: 98 } - ]) - .then((res) => { - assert.equal(res.affectedRows, 2); - conn.query('select * from `streamNamedPlaceHolders`').then((res) => { - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: str, - id4: null, - id5: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: str, - id4: null, - id5: 3 - } - ]); - conn.query('DROP TABLE streamNamedPlaceHolders'); - clearTimeout(timeout); - conn.end(); - done(); - }); - }) - .catch(done); - }) - .catch(done); + ]); + assert.equal(res.affectedRows, 2); + const rows = await conn.query('select * from `streamNamedPlaceHolders`'); + assert.deepEqual(rows, [ + { + id: 1, + id2: 1, + id3: 2, + t: str, + id4: null, + id5: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: str, + id4: null, + id5: 3 + } + ]); + conn.query('DROP TABLE streamNamedPlaceHolders'); + clearTimeout(timeout); + conn.end(); }; - const streamErrorNamedPlaceHolders = (useBulk, done) => { + const streamErrorNamedPlaceHolders = async (useBulk) => { const stream1 = fs.createReadStream(fileName); const stream2 = fs.createReadStream(fileName); - base - .createConnection({ namedPlaceholders: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn - .batch('INSERT INTO blabla values (1, :id1, 2, :id3, :id7, 3)', [ + const conn = await base.createConnection({ namedPlaceholders: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + + try { + await conn.batch('INSERT INTO blabla values (1, :id1, 2, :id3, :id7, 3)', [ { id1: 1, id3: stream1, id4: 99, id5: 6 }, { id1: 2, id3: stream2, id4: 98 } - ]) - .then(() => { - done(new Error('must have thrown error !')); - }) - .catch((err) => { - assert.isTrue(err != null); - assert.isTrue(err.message.includes(" doesn't exist")); - assert.isTrue( - err.message.includes( - "sql: INSERT INTO blabla values (1, :id1, 2, :id3, :id7, 3) - parameters:[{'id1':1,'id3':[object Object],'id4':99,'id5':6},{'id1':2,'id3':[object Object],'id4':98}]" - ) - ); - assert.equal(err.errno, 1146); - assert.equal(err.sqlState, '42S02'); - assert.equal(err.code, 'ER_NO_SUCH_TABLE'); - clearTimeout(timeout); - conn.end(); - done(); - }); - }) - .catch(done); + ]); + throw new Error('must have thrown error !'); + } catch (err) { + assert.isTrue(err != null); + assert.isTrue(err.message.includes(" doesn't exist")); + assert.isTrue( + err.message.includes( + "sql: INSERT INTO blabla values (1, :id1, 2, :id3, :id7, 3) - parameters:[{'id1':1,'id3':[object Object],'id4':99,'id5':6},{'id1':2,'id3':[object Object],'id4':98}]" + ) + ); + assert.equal(err.errno, 1146); + assert.equal(err.sqlState, '42S02'); + assert.equal(err.code, 'ER_NO_SUCH_TABLE'); + clearTimeout(timeout); + conn.end(); + } }; - const stream16MNamedPlaceHolders = function (useBulk, done) { + const stream16MNamedPlaceHolders = async function (useBulk) { const values = []; for (let i = 0; i < 1000000; i++) { if (i % 100000 === 0) values.push({ id1: i, id2: fs.createReadStream(fileName), id3: i * 2 }); @@ -1228,28 +980,26 @@ describe('batch', () => { }); } - base - .createConnection({ namedPlaceholders: true, bulk: useBulk }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - conn.query('DROP TABLE IF EXISTS stream16MNamedPlaceHolders'); - conn.query( - 'CREATE TABLE stream16MNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' - ); - conn - .batch( - 'INSERT INTO `stream16MNamedPlaceHolders` values (1, :id1, 2, :id2, :id3, 3)', - values - ) - .then((res) => { - assert.equal(res.affectedRows, 1000000); - let currRow = 0; + const conn = await base.createConnection({ namedPlaceholders: true, bulk: useBulk }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + conn.query('DROP TABLE IF EXISTS stream16MNamedPlaceHolders'); + conn.query( + 'CREATE TABLE stream16MNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' + ); + await conn.query('FLUSH TABLES'); + const res = await conn.batch( + 'INSERT INTO `stream16MNamedPlaceHolders` values (1, :id1, 2, :id2, :id3, 3)', + values + ); + assert.equal(res.affectedRows, 1000000); + let currRow = 0; + return new Promise(function (resolve, reject) { conn .queryStream('select * from `stream16MNamedPlaceHolders`') .on('error', (err) => { - done(new Error('must not have thrown any error !')); + reject(new Error('must not have thrown any error !')); }) .on('data', (row) => { assert.deepEqual(row, { @@ -1267,12 +1017,9 @@ describe('batch', () => { conn.query('DROP TABLE stream16MNamedPlaceHolders'); clearTimeout(timeout); conn.end(); - done(); + resolve(); }); - }) - .catch(done); - }) - .catch(done); + }); }; describe('standard question mark using bulk', () => { @@ -1280,798 +1027,780 @@ describe('batch', () => { if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); await shareConn.query('DROP TABLE IF EXISTS bufLength'); await shareConn.query('create table bufLength (val TEXT not null, val2 varchar(10))'); + await shareConn.query('FLUSH TABLES'); await shareConn.batch('update bufLength set val=?, val2=?', [ [Buffer.alloc(16366).toString(), 'abc'] ]); }); const useCompression = false; - it('simple batch, local date', function (done) { - if (process.env.SKYSQL || !base.utf8Collation()) { + it('simple batch, local date', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA || !base.utf8Collation()) { this.skip(); return; } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, true, 'local', done); + await simpleBatch(useCompression, true, 'local'); }); - it('simple batch with option', function (done) { - if (process.env.SKYSQL) { + it('simple batch with option', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) { this.skip(); return; } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatchWithOptions(useCompression, true, done); + await simpleBatchWithOptions(useCompression, true); }); - it('batch without value', function (done) { - if (process.env.SKYSQL) { + it('batch without value', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) { this.skip(); return; } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - noValueBatch(useCompression, true, done); + await noValueBatch(useCompression, true); }); - it('batch without parameter', function (done) { + it('batch without parameter', async function () { if ( process.env.SKYSQL || + process.env.SKYSQL_HA || (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) ) { this.skip(); return; } - base.createConnection({ compress: useCompression, bulk: true }).then((conn) => { - conn - .batch('INSERT INTO `blabla` values (?)') - .then((res) => { - conn.end(); - done(new Error('expect an error !')); - }) - .catch((err) => { - assert.isTrue(err.message.includes('Batch must have values set'), err.message); - conn.end(); - done(); - }); - }); + const conn = await base.createConnection({ compress: useCompression, bulk: true }); + try { + await conn.batch('INSERT INTO `blabla` values (?)'); + conn.end(); + throw new Error('expect an error !'); + } catch (err) { + assert.isTrue(err.message.includes('Batch must have values set'), err.message); + conn.end(); + } }); - it('batch with erroneous parameter', function (done) { + it('batch with erroneous parameter', async function () { if ( process.env.SKYSQL || + process.env.SKYSQL_HA || (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) ) { this.skip(); return; } - base.createConnection({ compress: useCompression, bulk: true }).then((conn) => { - conn - .batch('INSERT INTO `blabla` values (?, ?)', [ - [1, 2], - [1, undefined] - ]) - .then((res) => { - conn.end(); - done(new Error('expect an error !')); - }) - .catch((err) => { + const conn = await base.createConnection({ compress: useCompression, bulk: true }); + try { + await conn.batch('INSERT INTO `blabla` values (?, ?)', [ + [1, 2], + [1, undefined] + ]); + conn.end(); + throw new Error('expect an error !'); + } catch (err) { assert.isTrue( err.message.includes('Parameter at position 2 is undefined for values 1', err.message) ); conn.end(); - done(); - }); - }); + }; }); - it('simple batch offset date', function (done) { - if (process.env.SKYSQL || !base.utf8Collation()) { + it('simple batch offset date', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA || !base.utf8Collation()) { this.skip(); return; } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, true, timezoneParam, done); + await simpleBatch(useCompression, true, timezoneParam); }); - it('simple batch offset date Z ', function (done) { - if (process.env.SKYSQL || !base.utf8Collation()) { + it('simple batch offset date Z ', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA || !base.utf8Collation()) { this.skip(); return; } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, true, 'Z', done); + await simpleBatch(useCompression, true, 'Z'); }); - it('simple batch encoding CP1251', function (done) { - if (process.env.SKYSQL) { + it('simple batch encoding CP1251', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) { this.skip(); return; } this.timeout(30000); - simpleBatchEncodingCP1251(useCompression, true, 'local', done); + await simpleBatchEncodingCP1251(useCompression, true, 'local'); }); - it('simple batch error message ', function (done) { - if (process.env.SKYSQL) { + it('simple batch error message ', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) { this.skip(); return; } this.timeout(30000); - simpleBatchErrorMsg(useCompression, true, done); + await simpleBatchErrorMsg(useCompression, true); }); - it('simple batch error message packet split', function (done) { - if (process.env.SKYSQL) { + it('simple batch error message packet split', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) { this.skip(); return; } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatchErrorSplit(useCompression, true, 'local', done); + await simpleBatchErrorSplit(useCompression, true, 'local'); }); - it('non rewritable batch', function (done) { - if (process.env.SKYSQL || !supportBulk) { + it('non rewritable batch', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) { this.skip(); return; } this.timeout(30000); - nonRewritableBatch(useCompression, true, done); + await nonRewritableBatch(useCompression, true); }); - it('16M+ batch with 16M max_allowed_packet', function (done) { - if (process.env.SKYSQL || !process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { + it('16M+ batch with 16M max_allowed_packet', async function () { + if ( + process.env.SKYSQL || + process.env.SKYSQL_HA || + !process.env.RUN_LONG_TEST || + maxAllowedSize <= testSize + ) { this.skip(); return; } this.timeout(360000); - bigBatchWith16mMaxAllowedPacket(useCompression, true, done); + await bigBatchWith16mMaxAllowedPacket(useCompression, true); }); - it('16M+ batch with max_allowed_packet set to 4M', function (done) { - if (process.env.SKYSQL || !process.env.RUN_LONG_TEST || maxAllowedSize <= 4 * 1024 * 1024) { + it('16M+ batch with max_allowed_packet set to 4M', async function () { + if ( + process.env.SKYSQL || + process.env.SKYSQL_HA || + !process.env.RUN_LONG_TEST || + maxAllowedSize <= 4 * 1024 * 1024 + ) { this.skip(); return; } this.timeout(360000); - bigBatchWith4mMaxAllowedPacket(useCompression, true, done); + await bigBatchWith4mMaxAllowedPacket(useCompression, true); }); - it('16M+ error batch', function (done) { - if (process.env.SKYSQL || maxAllowedSize <= testSize) { + it('16M+ error batch', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA || maxAllowedSize <= testSize) { this.skip(); } else { this.timeout(360000); - bigBatchError(useCompression, true, done); + await bigBatchError(useCompression, true); } }); - it('16M+ single insert batch with no maxAllowedPacket set', function (done) { - if (process.env.SKYSQL || !process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { + it('16M+ single insert batch with no maxAllowedPacket set', async function () { + if ( + process.env.SKYSQL || + process.env.SKYSQL_HA || + !process.env.RUN_LONG_TEST || + maxAllowedSize <= testSize + ) { this.skip(); } else { this.timeout(360000); - singleBigInsertWithoutMaxAllowedPacket(useCompression, true, done); + await singleBigInsertWithoutMaxAllowedPacket(useCompression, true); } }); - it('batch with streams', function (done) { - if (process.env.SKYSQL || !base.utf8Collation()) { + it('batch with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA || !base.utf8Collation()) { this.skip(); } else { this.timeout(30000); - batchWithStream(useCompression, true, done); + await batchWithStream(useCompression, true); } }); - it('batch error with streams', function (done) { - if (process.env.SKYSQL) { + it('batch error with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) { this.skip(); } else { this.timeout(30000); - batchErrorWithStream(useCompression, true, done); + await batchErrorWithStream(useCompression, true); } }); - it('16M+ batch with streams', function (done) { - if (process.env.SKYSQL || !process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { + it('16M+ batch with streams', async function () { + if ( + process.env.SKYSQL || + process.env.SKYSQL_HA || + !process.env.RUN_LONG_TEST || + maxAllowedSize <= testSize + ) { this.skip(); } else { this.timeout(360000); - bigBatchWithStreams(useCompression, true, done); + await bigBatchWithStreams(useCompression, true); } }); - it('16M+ error batch with streams', function (done) { - if (process.env.SKYSQL || !process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { + it('16M+ error batch with streams', async function () { + if ( + process.env.SKYSQL || + process.env.SKYSQL_HA || + !process.env.RUN_LONG_TEST || + maxAllowedSize <= testSize + ) { this.skip(); return; } this.timeout(360000); - bigBatchErrorWithStreams(useCompression, true, done); + await bigBatchErrorWithStreams(useCompression, true); }); }); describe('standard question mark and compress with bulk', () => { const useCompression = true; - it('simple batch, local date', function (done) { - if (process.env.SKYSQL) this.skip(); + it('simple batch, local date', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, true, 'local', done); + await simpleBatch(useCompression, true, 'local'); }); - it('simple batch offset date', function (done) { - if (process.env.SKYSQL) this.skip(); + it('simple batch offset date', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, true, timezoneParam, done); + await simpleBatch(useCompression, true, timezoneParam); }); - it('simple batch error message ', function (done) { - if (process.env.SKYSQL) this.skip(); + it('simple batch error message ', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); - simpleBatchErrorMsg(useCompression, true, done); + await simpleBatchErrorMsg(useCompression, true); }); - it('batch without value', function (done) { - if (process.env.SKYSQL) this.skip(); + it('batch without value', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - noValueBatch(useCompression, true, done); + await noValueBatch(useCompression, true); }); - it('non rewritable batch', function (done) { - if (process.env.SKYSQL || !supportBulk) this.skip(); + it('non rewritable batch', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); this.timeout(30000); - nonRewritableBatch(useCompression, true, done); + await nonRewritableBatch(useCompression, true); }); - it('16M+ batch with 16M max_allowed_packet', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ batch with 16M max_allowed_packet', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - bigBatchWith16mMaxAllowedPacket(useCompression, true, done); + await bigBatchWith16mMaxAllowedPacket(useCompression, true); }); - it('16M+ batch with max_allowed_packet set to 4M', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ batch with max_allowed_packet set to 4M', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= 4 * 1024 * 1024) this.skip(); this.timeout(360000); - bigBatchWith4mMaxAllowedPacket(useCompression, true, done); + await bigBatchWith4mMaxAllowedPacket(useCompression, true); }); - it('16M+ error batch', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ error batch', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - bigBatchError(useCompression, true, done); + await bigBatchError(useCompression, true); }); - it('16M+ single insert batch with no maxAllowedPacket set', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ single insert batch with no maxAllowedPacket set', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - singleBigInsertWithoutMaxAllowedPacket(useCompression, true, done); + await singleBigInsertWithoutMaxAllowedPacket(useCompression, true); }); - it('batch with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + it('batch with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); - batchWithStream(useCompression, true, done); + await batchWithStream(useCompression, true); }); - it('batch error with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + it('batch error with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); - batchErrorWithStream(useCompression, true, done); + await batchErrorWithStream(useCompression, true); }); - it('16M+ batch with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ batch with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - bigBatchWithStreams(useCompression, true, done); + await bigBatchWithStreams(useCompression, true); }); - it('16M+ error batch with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ error batch with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - bigBatchErrorWithStreams(useCompression, true, done); + await bigBatchErrorWithStreams(useCompression, true); }); }); describe('standard question mark using rewrite', () => { const useCompression = false; - it('simple batch, local date', function (done) { + it('simple batch, local date', async function () { if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, false, 'local', done); + await simpleBatch(useCompression, false, 'local'); }); - it('batch without parameter', function (done) { + it('batch without parameter', async function () { if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - base.createConnection({ compress: useCompression, bulk: false }).then((conn) => { - conn - .batch('INSERT INTO `blabla` values (?)') - .then((res) => { - conn.end(); - done(new Error('expect an error !')); - }) - .catch((err) => { - assert.isTrue(err.message.includes('Batch must have values set'), err.message); - conn.end(); - done(); - }); - }); + const conn = await base.createConnection({ compress: useCompression, bulk: false }); + try { + await conn + .batch('INSERT INTO `blabla` values (?)'); + throw new Error('expect an error !'); + } catch (err) { + assert.isTrue(err.message.includes('Batch must have values set'), err.message); + conn.end(); + }; + }); + + it('rewrite split for maxAllowedPacket', async function () { + const t = makeid(100); + const conn = await base.createConnection({ bulk: false, maxAllowedPacket: 150 }); + conn.query('DROP TABLE IF EXISTS my_table'); + conn.query('CREATE TABLE my_table(id int, val LONGTEXT)'); + await conn.query('FLUSH TABLES'); + await conn.batch('INSERT INTO my_table(id,val) VALUES( ?, ?) ', [ + [1, t], + [2, t] + ]); + const res = await conn.query('SELECT * FROM my_table'); + assert.deepEqual(res, [ + { id: 1, val: t }, + { id: 2, val: t } + ]); + conn.end(); }); - it('rewrite split for maxAllowedPacket', function (done) { - const t = makeid(100); - base - .createConnection({ bulk: false, maxAllowedPacket: 150 }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS my_table') - .then(() => { - return conn.query('CREATE TABLE my_table(id int, val LONGTEXT)'); - }) - .then(() => { - return conn.batch('INSERT INTO my_table(id,val) VALUES( ?, ?) ', [ - [1, t], - [2, t] - ]); - }) - .then((res) => { - return conn.query('SELECT * FROM my_table'); - }) - .then((res) => { - assert.deepEqual(res, [ - { id: 1, val: t }, - { id: 2, val: t } - ]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); - }); - - it('batch with erroneous parameter', function (done) { + it('batch with erroneous parameter', async function () { if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - base.createConnection({ compress: useCompression, bulk: false }).then((conn) => { - conn - .batch('INSERT INTO `blabla` values (?,?)', [[1, 2], [1]]) - .then((res) => { - conn.end(); - done(new Error('expect an error !')); - }) - .catch((err) => { - assert.isTrue( - err.message.includes('Parameter at position 1 is not set for values 1'), - err.message - ); - conn.end(); - done(); - }); - }); + const conn = await base.createConnection({ compress: useCompression, bulk: false }); + try { + await conn.batch('INSERT INTO `blabla` values (?,?)', [[1, 2], [1]]); + conn.end(); + throw new Error('expect an error !'); + } catch (err) { + assert.isTrue( + err.message.includes('Parameter at position 1 is not set for values 1'), + err.message + ); + conn.end(); + } }); - it('batch without value', function (done) { + it('batch without value', async function () { this.timeout(30000); - noValueBatch(useCompression, false, done); + await noValueBatch(useCompression, false); }); - it('batch with undefined parameter', function (done) { + it('batch with undefined parameter', async function () { if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - base.createConnection({ compress: useCompression, bulk: false }).then((conn) => { - conn - .batch('INSERT INTO `blabla` values (?,?)', [ - [1, 2], - [1, undefined] - ]) - .then((res) => { - conn.end(); - done(new Error('expect an error !')); - }) - .catch((err) => { - assert.isTrue( - err.message.includes('Parameter at position 2 is undefined for values 1'), - err.message - ); - conn.end(); - done(); - }); - }); + const conn = await base.createConnection({ compress: useCompression, bulk: false }); + try { + await conn + .batch('INSERT INTO `blabla` values (?,?)', [ + [1, 2], + [1, undefined] + ]); + conn.end(); + throw new Error('expect an error !'); + } catch (err) { + assert.isTrue( + err.message.includes('Parameter at position 2 is undefined for values 1'), + err.message + ); + conn.end(); + } }); - it('simple batch offset date', function (done) { + it('simple batch offset date', async function () { if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, false, timezoneParam, done); + await simpleBatch(useCompression, false, timezoneParam); }); - it('simple batch error message ', function (done) { + it('simple batch error message ', async function () { this.timeout(30000); - simpleBatchErrorMsg(useCompression, false, done); + await simpleBatchErrorMsg(useCompression, false); }); - it('simple batch error message truncated', function (done) { + it('simple batch error message truncated', async function () { this.timeout(30000); - displayError(80, done); + await displayError(80); }); - it('simple batch error message super truncated', function (done) { + it('simple batch error message super truncated', async function () { this.timeout(30000); - displayError(50, done); + await displayError(50); }); - const displayError = (debugLen, done) => { - base - .createConnection({ trace: true, bulk: false, debugLen: debugLen }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn + const displayError = async (debugLen) => { + const conn = await base.createConnection({ trace: true, bulk: false, debugLen: debugLen }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + try { + await conn .batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ [1, 'john"'], [2, 'jac"k'] - ]) - .then(() => { - done(new Error('must have thrown error !')); - }) - .catch((err) => { - assert.isTrue(err != null); - assert.isTrue(err.message.includes(" doesn't exist")); - const expectedMsg = - debugLen === 80 - ? "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'jo...]" - : 'INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?...'; - assert.isTrue(err.message.includes(expectedMsg)); - assert.equal(err.errno, 1146); - assert.equal(err.sqlState, '42S02'); - assert.equal(err.code, 'ER_NO_SUCH_TABLE'); - conn.end(); - clearTimeout(timeout); - done(); - }); - }) - .catch(done); + ]); + throw new Error('must have thrown error !'); + } catch (err) { + assert.isTrue(err != null); + assert.isTrue(err.message.includes(" doesn't exist")); + const expectedMsg = + debugLen === 80 + ? "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'jo...]" + : 'INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?...'; + assert.isTrue(err.message.includes(expectedMsg)); + assert.equal(err.errno, 1146); + assert.equal(err.sqlState, '42S02'); + assert.equal(err.code, 'ER_NO_SUCH_TABLE'); + conn.end(); + clearTimeout(timeout); + } }; - it('non rewritable batch', function (done) { + it('non rewritable batch', async function () { this.timeout(30000); - nonRewritableBatch(useCompression, false, done); + await nonRewritableBatch(useCompression, false); }); - it('16M+ batch with 16M max_allowed_packet', function (done) { + it('16M+ batch with 16M max_allowed_packet', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - bigBatchWith16mMaxAllowedPacket(useCompression, false, done); + await bigBatchWith16mMaxAllowedPacket(useCompression, false); }); - it('16M+ batch with max_allowed_packet set to 4M', function (done) { + it('16M+ batch with max_allowed_packet set to 4M', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= 4 * 1024 * 1024) this.skip(); this.timeout(360000); - bigBatchWith4mMaxAllowedPacket(useCompression, false, done); + await bigBatchWith4mMaxAllowedPacket(useCompression, false); }); - it('16M+ error batch', function (done) { + it('16M+ error batch', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - bigBatchError(useCompression, false, done); + await bigBatchError(useCompression, false); }); - it('16M+ single insert batch with no maxAllowedPacket set', function (done) { + it('16M+ single insert batch with no maxAllowedPacket set', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - singleBigInsertWithoutMaxAllowedPacket(useCompression, false, done); + await singleBigInsertWithoutMaxAllowedPacket(useCompression, false); }); - it('batch with streams', function (done) { + it('batch with streams', async function () { if (!base.utf8Collation()) this.skip(); this.timeout(30000); - batchWithStream(useCompression, false, done); + await batchWithStream(useCompression, false); }); - it('batch error with streams', function (done) { + it('batch error with streams', async function () { this.timeout(30000); - batchErrorWithStream(useCompression, false, done); + await batchErrorWithStream(useCompression, false); }); - it('16M+ batch with streams', function (done) { + it('16M+ batch with streams', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - bigBatchWithStreams(useCompression, false, done); + await bigBatchWithStreams(useCompression, false); }); - it('16M+ error batch with streams', function (done) { + it('16M+ error batch with streams', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - bigBatchErrorWithStreams(useCompression, false, done); + await bigBatchErrorWithStreams(useCompression, false); }); }); describe('standard question mark and compress with rewrite', () => { const useCompression = true; - it('simple batch, local date', function (done) { + it('simple batch, local date', async function () { if (!base.utf8Collation()) { this.skip(); return; } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, false, 'local', done); + await simpleBatch(useCompression, false, 'local'); }); - it('simple batch offset date', function (done) { + it('simple batch offset date', async function () { if (!base.utf8Collation()) { this.skip(); return; } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - simpleBatch(useCompression, false, timezoneParam, done); + await simpleBatch(useCompression, false, timezoneParam); }); - it('simple batch error message ', function (done) { + it('simple batch error message ', async function () { this.timeout(30000); - simpleBatchErrorMsg(useCompression, false, done); + await simpleBatchErrorMsg(useCompression, false); }); - it('batch without value', function (done) { + it('batch without value', async function () { this.timeout(30000); - noValueBatch(useCompression, false, done); + await noValueBatch(useCompression, false); }); - it('simple batch error message truncated', function (done) { + it('simple batch error message truncated', async function () { this.timeout(30000); - displayError(80, done); + await displayError(80); }); - it('simple batch error message super truncated', function (done) { + it('simple batch error message super truncated', async function () { this.timeout(30000); - displayError(50, done); + await displayError(50); }); - const displayError = (debugLen, done) => { - base - .createConnection({ trace: true, bulk: false, debugLen: debugLen }) - .then((conn) => { - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 25000); - conn - .batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ + const displayError = async (debugLen) => { + const conn = await base.createConnection({ trace: true, bulk: false, debugLen: debugLen }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 25000); + try { + await conn.batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ [1, 'john"'], [2, 'jac"k'] - ]) - .then(() => { - done(new Error('must have thrown error !')); - }) - .catch((err) => { - assert.isTrue(err != null); - assert.isTrue(err.message.includes(" doesn't exist")); - const expectedMsg = - debugLen === 80 - ? "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'jo...]" - : 'INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?...'; - assert.isTrue(err.message.includes(expectedMsg)); - assert.equal(err.errno, 1146); - assert.equal(err.sqlState, '42S02'); - assert.equal(err.code, 'ER_NO_SUCH_TABLE'); - conn.end(); - clearTimeout(timeout); - done(); - }); - }) - .catch(done); + ]); + throw new Error('must have thrown error !'); + } catch (err) { + assert.isTrue(err != null); + assert.isTrue(err.message.includes(" doesn't exist")); + const expectedMsg = + debugLen === 80 + ? "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'jo...]" + : 'INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?...'; + assert.isTrue(err.message.includes(expectedMsg)); + assert.equal(err.errno, 1146); + assert.equal(err.sqlState, '42S02'); + assert.equal(err.code, 'ER_NO_SUCH_TABLE'); + conn.end(); + clearTimeout(timeout); + } }; - it('non rewritable batch', function (done) { + it('non rewritable batch', async function () { this.timeout(30000); - nonRewritableBatch(useCompression, false, done); + await nonRewritableBatch(useCompression, false); }); - it('16M+ batch with 16M max_allowed_packet', function (done) { + it('16M+ batch with 16M max_allowed_packet', async function () { if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); return; } this.timeout(360000); - bigBatchWith16mMaxAllowedPacket(useCompression, false, done); + await bigBatchWith16mMaxAllowedPacket(useCompression, false); }); - it('16M+ batch with max_allowed_packet set to 4M', function (done) { + it('16M+ batch with max_allowed_packet set to 4M', async function () { if (!process.env.RUN_LONG_TEST || maxAllowedSize <= 4 * 1024 * 1024) { this.skip(); return; } this.timeout(360000); - bigBatchWith4mMaxAllowedPacket(useCompression, false, done); + await bigBatchWith4mMaxAllowedPacket(useCompression, false); }); - it('16M+ error batch', function (done) { + it('16M+ error batch', async function () { if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); return; } this.timeout(360000); - bigBatchError(useCompression, false, done); + await bigBatchError(useCompression, false); }); - it('16M+ single insert batch with no maxAllowedPacket set', function (done) { + it('16M+ single insert batch with no maxAllowedPacket set', async function () { if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); return; } this.timeout(360000); - singleBigInsertWithoutMaxAllowedPacket(useCompression, false, done); + await singleBigInsertWithoutMaxAllowedPacket(useCompression, false); }); - it('batch with streams', function (done) { + it('batch with streams', async function () { if (!base.utf8Collation()) { this.skip(); return; } this.timeout(30000); - batchWithStream(useCompression, false, done); + await batchWithStream(useCompression, false); }); - it('batch error with streams', function (done) { + it('batch error with streams', async function () { this.timeout(30000); - batchErrorWithStream(useCompression, false, done); + await batchErrorWithStream(useCompression, false); }); - it('16M+ batch with streams', function (done) { + it('16M+ batch with streams', async function () { if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); return; } this.timeout(360000); - bigBatchWithStreams(useCompression, false, done); + await bigBatchWithStreams(useCompression, false); }); - it('16M+ error batch with streams', function (done) { + it('16M+ error batch with streams', async function () { if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); return; } this.timeout(360000); - bigBatchErrorWithStreams(useCompression, false, done); + await bigBatchErrorWithStreams(useCompression, false); }); }); describe('named parameter with bulk', () => { - it('simple batch', function (done) { - if (process.env.SKYSQL) this.skip(); + it('simple batch', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); - simpleNamedPlaceHolders(true, done); + await simpleNamedPlaceHolders(true); }); - it('simple batch error', function (done) { - if (process.env.SKYSQL) this.skip(); + it('simple batch error', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); - simpleNamedPlaceHoldersErr(true, done); + await simpleNamedPlaceHoldersErr(true); }); - it('non rewritable batch', function (done) { - if (process.env.SKYSQL || !supportBulk) this.skip(); + it('non rewritable batch', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); this.timeout(30000); - nonRewritableHoldersErr(true, done); + await nonRewritableHoldersErr(true); }); - it('16M+ batch', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ batch', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - more16MNamedPlaceHolders(true, done); + await more16MNamedPlaceHolders(true); }); - it('16M+ single insert batch', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ single insert batch', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - more16MSingleNamedPlaceHolders(true, done); + await more16MSingleNamedPlaceHolders(true); }); - it('batch with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + it('batch with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); - streamNamedPlaceHolders(true, done); + await streamNamedPlaceHolders(true); }); - it('batch error with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + it('batch error with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); - streamErrorNamedPlaceHolders(true, done); + await streamErrorNamedPlaceHolders(true); }); - it('16M+ batch with streams', function (done) { - if (process.env.SKYSQL) this.skip(); + it('16M+ batch with streams', async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - stream16MNamedPlaceHolders(true, done); + await stream16MNamedPlaceHolders(true); }); }); describe('named parameter with rewrite', () => { - it('simple batch', function (done) { + it('simple batch', async function () { this.timeout(30000); - simpleNamedPlaceHolders(false, done); + await simpleNamedPlaceHolders(false); }); - it('simple batch error', function (done) { + it('simple batch error', async function () { this.timeout(30000); - simpleNamedPlaceHoldersErr(false, done); + await simpleNamedPlaceHoldersErr(false); }); - it('non rewritable batch', function (done) { + it('non rewritable batch', async function () { this.timeout(30000); - nonRewritableHoldersErr(false, done); + await nonRewritableHoldersErr(false); }); - it('16M+ batch', function (done) { + it('16M+ batch', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - more16MNamedPlaceHolders(false, done); + await more16MNamedPlaceHolders(false); }); - it('16M+ single insert batch', function (done) { + it('16M+ single insert batch', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - more16MSingleNamedPlaceHolders(false, done); + await more16MSingleNamedPlaceHolders(false); }); - it('batch with streams', function (done) { + it('batch with streams', async function () { if (!base.utf8Collation()) this.skip(); this.timeout(30000); - streamNamedPlaceHolders(false, done); + await streamNamedPlaceHolders(false); }); - it('batch error with streams', function (done) { + it('batch error with streams', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); this.timeout(30000); - streamErrorNamedPlaceHolders(false, done); + await streamErrorNamedPlaceHolders(false); }); - it('16M+ batch with streams', function (done) { + it('16M+ batch with streams', async function () { if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); - stream16MNamedPlaceHolders(false, done); + await stream16MNamedPlaceHolders(false); }); }); }); diff --git a/test/integration/test-big-query.js b/test/integration/test-big-query.js index 493a85ee..614b1017 100644 --- a/test/integration/test-big-query.js +++ b/test/integration/test-big-query.js @@ -23,74 +23,45 @@ describe('Big query', function () { .catch(done); }); - it('parameter bigger than 16M packet size', function (done) { + it('parameter bigger than 16M packet size', async function () { if (maxAllowedSize <= testSize) this.skip(); this.timeout(20000); //can take some time - shareConn - .query('DROP TABLE IF EXISTS bigParameterBigParam') - .then(() => { - return shareConn.query('CREATE TABLE bigParameterBigParam (b longblob)'); - }) - .then(() => { - return shareConn.query('insert into bigParameterBigParam(b) values(?)', [buf]); - }) - .then(() => { - return shareConn.query('SELECT * from bigParameterBigParam'); - }) - .then((rows) => { - assert.deepEqual(rows[0].b, buf); - done(); - }) - .catch(done); + shareConn.query('DROP TABLE IF EXISTS bigParameterBigParam'); + shareConn.query('CREATE TABLE bigParameterBigParam (b longblob)'); + await shareConn.query('FLUSH TABLES'); + shareConn.query('insert into bigParameterBigParam(b) values(?)', [buf]); + const rows = await shareConn.query('SELECT * from bigParameterBigParam'); + assert.deepEqual(rows[0].b, buf); }); - it('int8 buffer overflow', function (done) { + it('int8 buffer overflow', async function () { const buf = Buffer.alloc(979, '0'); - base.createConnection({ collation: 'latin1_swedish_ci' }).then((conn) => { - conn - .query('DROP TABLE IF EXISTS bigParameterInt8') - .then(() => { - return conn.query('CREATE TABLE bigParameterInt8 (a varchar(1024), b varchar(10))'); - }) - .then(() => { - return conn.query('insert into bigParameterInt8 values(?, ?)', [buf.toString(), 'test']); - }) - .then(() => { - return conn.query('SELECT * from bigParameterInt8'); - }) - .then((rows) => { - assert.deepEqual(rows[0].a, buf.toString()); - assert.deepEqual(rows[0].b, 'test'); - conn.end(); - done(); - }) - .catch(done); - }); + const conn = await base.createConnection({ collation: 'latin1_swedish_ci' }); + conn.query('DROP TABLE IF EXISTS bigParameterInt8'); + conn.query('CREATE TABLE bigParameterInt8 (a varchar(1024), b varchar(10))'); + await conn.query('FLUSH TABLE'); + await conn.query('insert into bigParameterInt8 values(?, ?)', [buf.toString(), 'test']); + const rows = await conn.query('SELECT * from bigParameterInt8'); + assert.deepEqual(rows[0].a, buf.toString()); + assert.deepEqual(rows[0].b, 'test'); + conn.end(); }); - it('buffer growing', function (done) { + it('buffer growing', async function () { if (maxAllowedSize <= 11 * 1024 * 1024) this.skip(); this.timeout(10000); //can take some time - base - .createConnection() - .then((conn) => { - bufferGrowing(conn, done); - }) - .catch(done); + const conn = await base.createConnection({ compress: true }); + await bufferGrowing(conn); }); - it('buffer growing compression', function (done) { + it('buffer growing compression', async function () { if (maxAllowedSize <= 11 * 1024 * 1024) this.skip(); this.timeout(10000); //can take some time - base - .createConnection({ compress: true }) - .then((conn) => { - bufferGrowing(conn, done); - }) - .catch(done); + const conn = await base.createConnection({ compress: true }); + await bufferGrowing(conn); }); - function bufferGrowing(conn, done) { + async function bufferGrowing(conn) { const st = Buffer.alloc(65536, '0').toString(); const st2 = Buffer.alloc(1048576, '0').toString(); const params = [st]; @@ -103,27 +74,14 @@ describe('Big query', function () { } sql += ')'; sqlInsert += ')'; - conn - .query('DROP TABLE IF EXISTS bigParameter') - .then(() => { - return conn.query(sql); - }) - .then(() => { - return conn.query(sqlInsert, params); - }) - .then(() => { - return conn.query('SELECT * from bigParameter'); - }) - .then((rows) => { - for (let i = 0; i < 10; i++) { - assert.deepEqual(rows[0]['a' + i], params[i]); - } - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); + conn.query('DROP TABLE IF EXISTS bigParameter'); + conn.query(sql); + await shareConn.query('FLUSH TABLES'); + conn.query(sqlInsert, params); + const rows = await conn.query('SELECT * from bigParameter'); + for (let i = 0; i < 10; i++) { + assert.deepEqual(rows[0]['a' + i], params[i]); + } + conn.end(); } }); diff --git a/test/integration/test-call.js b/test/integration/test-call.js index b6203cbf..94872772 100644 --- a/test/integration/test-call.js +++ b/test/integration/test-call.js @@ -6,7 +6,7 @@ const { assert } = require('chai'); describe('stored procedure', () => { before(function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); shareConn .query('CREATE PROCEDURE stmtSimple (IN p1 INT, IN p2 INT) begin SELECT p1 + p2 t; end') .then(() => { diff --git a/test/integration/test-change-user.js b/test/integration/test-change-user.js index f5ad0d19..23eac54a 100644 --- a/test/integration/test-change-user.js +++ b/test/integration/test-change-user.js @@ -323,7 +323,8 @@ describe('change user', () => { shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 2) && !process.env.MAXSCALE_TEST_DISABLE && - !process.env.SKYSQL + !process.env.SKYSQL && + !process.env.SKYSQL_HA ) { assert.equal(conn.info.database, 'test'); } diff --git a/test/integration/test-cluster.js b/test/integration/test-cluster.js index 9ba54488..471eef1c 100644 --- a/test/integration/test-cluster.js +++ b/test/integration/test-cluster.js @@ -10,29 +10,16 @@ const base = require('../base.js'); const { assert } = require('chai'); describe('cluster', function () { - before(function (done) { - if (process.env.SKYSQL) this.skip(); - shareConn - .query('DROP TABLE IF EXISTS clusterInsert') - .then(() => { - shareConn - .query('CREATE TABLE clusterInsert(id int, nam varchar(256))') - .then(() => { - done(); - }) - .catch(done); - }) - .catch(done); + before(async function () { + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); + await shareConn.query('DROP TABLE IF EXISTS clusterInsert'); + await shareConn.query('CREATE TABLE clusterInsert(id int, nam varchar(256))'); + await shareConn.query('FLUSH TABLES'); }); describe('promise', function () { - beforeEach(function (done) { - shareConn - .query('TRUNCATE TABLE clusterInsert') - .then(() => { - done(); - }) - .catch(done); + beforeEach(async function () { + await shareConn.query('TRUNCATE TABLE clusterInsert'); }); it('no node', function (done) { @@ -436,7 +423,7 @@ describe('cluster', function () { }); it('reusing node after timeout', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(20000); const cl = get3NodeClusterWithProxy({ restoreNodeTimeout: 500 }, basePromise); const poolCluster = cl.cluster; @@ -485,7 +472,7 @@ describe('cluster', function () { }); it('server close connection during query', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (process.env.MAXSCALE_TEST_DISABLE) this.skip(); this.timeout(10000); const poolCluster = basePromise.createPoolCluster({}); @@ -665,20 +652,17 @@ describe('cluster', function () { }); }); - it('batch on filtered', function (done) { + it('batch on filtered', async function () { this.timeout(10000); const poolCluster = get3NodeCluster(); const filteredCluster = poolCluster.of(/^node[12]/); - filteredCluster - .query('DROP TABLE IF EXISTS filteredSimpleBatch') - .then(() => { - return filteredCluster.query( + await filteredCluster.query('DROP TABLE IF EXISTS filteredSimpleBatch'); + await filteredCluster.query( 'CREATE TABLE filteredSimpleBatch(id int not null primary key auto_increment, val int)' ); - }) - .then(() => { - const promises = []; + await filteredCluster.query('FLUSH TABLES'); + const promises = []; for (let i = 0; i < 60; i++) { promises.push( filteredCluster.batch('INSERT INTO filteredSimpleBatch(val) values (?)', [ @@ -688,22 +672,10 @@ describe('cluster', function () { ]) ); } - Promise.all(promises) - .then(() => { - return filteredCluster.query('SELECT count(*) as nb FROM filteredSimpleBatch'); - }) - .then((res) => { - expect(res[0].nb).to.equal(180); - poolCluster.end().then(() => { - done(); - }); - }) - .catch((err) => { - poolCluster.end().then(() => { - done(err); - }); - }); - }); + await Promise.all(promises); + const res = await filteredCluster.query('SELECT count(*) as nb FROM filteredSimpleBatch'); + expect(res[0].nb).to.equal(180); + await poolCluster.end(); }); it('batch error on filtered', function (done) { diff --git a/test/integration/test-compression.js b/test/integration/test-compression.js index a778fe2e..dca49125 100644 --- a/test/integration/test-compression.js +++ b/test/integration/test-compression.js @@ -49,7 +49,7 @@ describe('Compression', function () { }); it('multiple packet result (multiple rows)', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); //using sequence engine if (!conn.info.isMariaDB() || !conn.info.hasMinVersion(10, 1)) this.skip(); conn @@ -68,45 +68,25 @@ describe('Compression', function () { .catch(done); }); - it('parameter bigger than 16M packet size', function (done) { + it('parameter bigger than 16M packet size', async function () { if (maxAllowedSize <= testSize) this.skip(); this.timeout(20000); //can take some time - conn - .query('DROP TABLE IF EXISTS bigParameter') - .then(() => { - return conn.query('CREATE TABLE bigParameter (b longblob)'); - }) - .then(() => { - return conn.query('insert into bigParameter(b) values(?)', [buf]); - }) - .then(() => { - return conn.query('SELECT * from bigParameter'); - }) - .then((rows) => { - assert.deepEqual(rows[0].b, buf); - done(); - }) - .catch(done); + conn.query('DROP TABLE IF EXISTS bigParameter'); + conn.query('CREATE TABLE bigParameter (b longblob)'); + await conn.query('FLUSH TABLES'); + conn.query('insert into bigParameter(b) values(?)', [buf]); + const rows = await conn.query('SELECT * from bigParameter'); + assert.deepEqual(rows[0].b, buf); }); - it('multi compression packet size', function (done) { + it('multi compression packet size', async function () { if (maxAllowedSize <= testSize) this.skip(); this.timeout(20000); //can take some time - conn - .query('DROP TABLE IF EXISTS bigParameter2') - .then(() => { - return conn.query('CREATE TABLE bigParameter2 (b longblob)'); - }) - .then(() => { - return conn.query('insert into bigParameter2(b) values(?)', [randomBuf]); - }) - .then(() => { - return conn.query('SELECT * from bigParameter2'); - }) - .then((rows) => { - assert.deepEqual(rows[0].b, randomBuf); - done(); - }) - .catch(done); + conn.query('DROP TABLE IF EXISTS bigParameter2'); + conn.query('CREATE TABLE bigParameter2 (b longblob)'); + await conn.query('FLUSH TABLES'); + conn.query('insert into bigParameter2(b) values(?)', [randomBuf]); + const rows = await conn.query('SELECT * from bigParameter2'); + assert.deepEqual(rows[0].b, randomBuf); }); }); diff --git a/test/integration/test-connection-opts.js b/test/integration/test-connection-opts.js index 62c1f8f2..c68daeb4 100644 --- a/test/integration/test-connection-opts.js +++ b/test/integration/test-connection-opts.js @@ -245,38 +245,22 @@ describe('connection option', () => { .catch(done); }); - it('Server with different tz', function (done) { + it('Server with different tz', async function () { if (process.env.MAXSCALE_TEST_DISABLE) this.skip(); //MySQL 5.5 doesn't have milliseconds if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); - base - .createConnection({ timezone: 'Etc/GMT+5' }) - .then((conn) => { - const now = new Date(); - conn - .query("SET SESSION time_zone = '-05:00'") - .then(() => { - return conn.query('DROP TABLE IF EXISTS t1'); - }) - .then(() => { - return conn.query('CREATE TABLE t1 (a timestamp(6))'); - }) - .then(() => { - return conn.query('INSERT INTO t1 values (?)', now); - }) - .then(() => { - return conn.query('SELECT NOW() as b, t1.a FROM t1'); - }) - .then((res) => { - assert.deepEqual(res[0].a, now); - assert.isOk(Math.abs(res[0].b.getTime() - now.getTime()) < 5000); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); + const conn = await base.createConnection({ timezone: 'Etc/GMT+5' }); + const now = new Date(); + conn.query("SET SESSION time_zone = '-05:00'"); + conn.query('DROP TABLE IF EXISTS t1'); + conn.query('CREATE TABLE t1 (a timestamp(6))'); + await conn.query('FLUSH TABLES'); + conn.query('INSERT INTO t1 values (?)', now); + const res = await conn.query('SELECT NOW() as b, t1.a FROM t1'); + assert.deepEqual(res[0].a, now); + assert.isOk(Math.abs(res[0].b.getTime() - now.getTime()) < 5000); + conn.end(); }); it('nestTables results boolean', function (done) { @@ -532,7 +516,7 @@ describe('connection option', () => { }); it('connection timeout', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(10000); if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 1, 2)) { base @@ -577,7 +561,7 @@ describe('connection option', () => { }); it('connection timeout superseded', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(10000); if (!shareConn.info.isMariaDB() || !shareConn.info.hasMinVersion(10, 1, 2)) this.skip(); base diff --git a/test/integration/test-connection.js b/test/integration/test-connection.js index 45271da2..ac1b4517 100644 --- a/test/integration/test-connection.js +++ b/test/integration/test-connection.js @@ -182,7 +182,8 @@ describe('connection', () => { }); it('connection error event', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA || process.env.MAXSCALE_TEST_DISABLE) + this.skip(); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); base .createConnection() @@ -203,7 +204,7 @@ describe('connection', () => { }); it('connection error event socket failed', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); base .createConnection({ socketTimeout: 100 }) .then((conn) => { @@ -425,7 +426,8 @@ describe('connection', () => { }); it('connection.destroy() during query execution', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); this.timeout(10000); base.createConnection().then((conn) => { @@ -592,7 +594,8 @@ describe('connection', () => { (shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(10, 2, 2)) || (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 7, 4)) || process.env.MAXSCALE_TEST_DISABLE || - process.env.SKYSQL + process.env.SKYSQL || + process.env.SKYSQL_HA ) { //session tracking not implemented this.skip(); @@ -775,7 +778,8 @@ describe('connection', () => { ((shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2)) || (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7))) && !process.env.MAXSCALE_TEST_DISABLE && - !process.env.SKYSQL + !process.env.SKYSQL && + !process.env.SKYSQL_HA ) { //ok packet contain meta change assert.equal(shareConn.info.database, 'changedb'); @@ -850,7 +854,8 @@ describe('connection', () => { !shareConn.info.isMariaDB() || !shareConn.info.hasMinVersion(10, 4, 3) || process.env.MAXSCALE_TEST_DISABLE || - process.env.SKYSQL + process.env.SKYSQL || + process.env.SKYSQL_HA ) { //session tracking not implemented this.skip(); @@ -891,7 +896,8 @@ describe('connection', () => { !shareConn.info.isMariaDB() || !shareConn.info.hasMinVersion(10, 4, 3) || process.env.MAXSCALE_TEST_DISABLE || - process.env.SKYSQL + process.env.SKYSQL || + process.env.SKYSQL_HA ) { //session tracking not implemented this.skip(); diff --git a/test/integration/test-debug.js b/test/integration/test-debug.js index 994f9040..9439ae08 100644 --- a/test/integration/test-debug.js +++ b/test/integration/test-debug.js @@ -66,8 +66,9 @@ describe('debug', () => { .then(() => { if ( compress && - process.env.MAXSCALE_TEST_DISABLE == undefined && - process.env.SKYSQL == undefined + !process.env.MAXSCALE_TEST_DISABLE && + !process.env.SKYSQL && + !process.env.SKYSQL_HA ) { conn.debugCompress(true); } else { @@ -78,8 +79,9 @@ describe('debug', () => { .then(() => { if ( compress && - process.env.MAXSCALE_TEST_DISABLE == undefined && - process.env.SKYSQL == undefined + !process.env.MAXSCALE_TEST_DISABLE && + !process.env.SKYSQL && + !process.env.SKYSQL_HA ) { conn.debugCompress(false); } else { @@ -96,14 +98,16 @@ describe('debug', () => { console.log = initialStdOut; const serverVersion = conn.serverVersion(); - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) compress = false; + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + compress = false; const rangeWithEOF = compress ? [900, 1200] : [1800, 2400]; const rangeWithoutEOF = compress ? [900, 1200] : [1750, 2000]; if ( ((conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 2)) || (!conn.info.isMariaDB() && conn.info.hasMinVersion(5, 7, 5))) && !process.env.MAXSCALE_TEST_DISABLE && - !process.env.SKYSQL + !process.env.SKYSQL && + !process.env.SKYSQL_HA ) { assert( data.length > rangeWithoutEOF[0] && data.length < rangeWithoutEOF[1], @@ -145,7 +149,8 @@ describe('debug', () => { } it('select big request (compressed data) debug', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); initialStdOut = console.log; let data = ''; console.log = function () { diff --git a/test/integration/test-error.js b/test/integration/test-error.js index 788cf4e6..252b1df1 100644 --- a/test/integration/test-error.js +++ b/test/integration/test-error.js @@ -374,7 +374,8 @@ describe('Error', () => { it('server close connection - no connection error event', function (done) { this.timeout(20000); - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); // Remove Mocha's error listener const originalException = process.listeners('uncaughtException').pop(); process.removeListener('uncaughtException', originalException); @@ -415,7 +416,8 @@ describe('Error', () => { }); it('server close connection during query', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); this.timeout(20000); base .createConnection() diff --git a/test/integration/test-multi-results.js b/test/integration/test-multi-results.js index ec66e25e..1755afc8 100644 --- a/test/integration/test-multi-results.js +++ b/test/integration/test-multi-results.js @@ -294,7 +294,7 @@ describe('multi-results', () => { }); it('query result with option metaPromiseAsArray multiple', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); base.createConnection({ metaAsArray: true, multipleStatements: true }).then((conn) => { conn .query('select 1; select 2') @@ -329,7 +329,7 @@ describe('multi-results', () => { }); it('multiple selects', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); multiStmtConn .query('SELECT 1 as t; SELECT 2 as t2; SELECT 3 as t3') .then((rows) => { @@ -343,7 +343,7 @@ describe('multi-results', () => { }); it('multiple selects with callbacks', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); const callbackConn = base.createCallbackConnection({ multipleStatements: true }); @@ -368,7 +368,7 @@ describe('multi-results', () => { }); it('multiple result type', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); multiStmtConn .query('SELECT 1 as t; DO 1') .then((rows) => { @@ -385,7 +385,7 @@ describe('multi-results', () => { }); it('multiple result type with callback', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); const callbackConn = base.createCallbackConnection({ multipleStatements: true }); @@ -413,7 +413,7 @@ describe('multi-results', () => { }); it('multiple result type with multiple rows', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); //using sequence engine if (!shareConn.info.isMariaDB() || !shareConn.info.hasMinVersion(10, 1)) this.skip(); multiStmtConn @@ -433,7 +433,7 @@ describe('multi-results', () => { }); it('multiple result from procedure', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); shareConn.query('CREATE PROCEDURE myProc () BEGIN SELECT 1; SELECT 2; END'); shareConn .query('call myProc()') diff --git a/test/integration/test-ok-packet.js b/test/integration/test-ok-packet.js index 1f0b5c09..04b4f88d 100644 --- a/test/integration/test-ok-packet.js +++ b/test/integration/test-ok-packet.js @@ -138,7 +138,7 @@ describe('ok packet', () => { }); it('multiple insert result', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); base .createConnection({ multipleStatements: true }) .then((conn) => { diff --git a/test/integration/test-pool-callback-event.js b/test/integration/test-pool-callback-event.js index d771d831..8cac83e8 100644 --- a/test/integration/test-pool-callback-event.js +++ b/test/integration/test-pool-callback-event.js @@ -10,7 +10,7 @@ const os = require('os'); describe('Pool callback event', () => { before(function () { - if (process.env.SKYSQL != null) this.skip(); + if (process.env.SKYSQL != null || process.env.SKYSQL_HA != null) this.skip(); }); it('pool connection creation', function (done) { @@ -47,7 +47,7 @@ describe('Pool callback event', () => { }); it('pool connection enqueue', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(20000); const pool = base.createPoolCallback({ connectionLimit: 2, acquireTimeout: 20000 }); let enqueueNumber = 0; diff --git a/test/integration/test-pool-callback.js b/test/integration/test-pool-callback.js index 7adacd16..4a5249bf 100644 --- a/test/integration/test-pool-callback.js +++ b/test/integration/test-pool-callback.js @@ -6,7 +6,8 @@ const Conf = require('../conf'); describe('Pool callback', () => { before(function () { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); }); it('pool with wrong authentication', function (done) { @@ -86,7 +87,7 @@ describe('Pool callback', () => { }); it('create pool', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(5000); const pool = base.createPoolCallback({ connectionLimit: 1 }); const initTime = Date.now(); @@ -107,7 +108,7 @@ describe('Pool callback', () => { }); it('create pool with noControlAfterUse', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(5000); const pool = base.createPoolCallback({ connectionLimit: 1, @@ -180,7 +181,7 @@ describe('Pool callback', () => { }); it('pool getConnection timeout', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); const pool = base.createPoolCallback({ connectionLimit: 1, acquireTimeout: 200 @@ -206,7 +207,7 @@ describe('Pool callback', () => { }); it('pool query timeout', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(5000); const pool = base.createPoolCallback({ connectionLimit: 1, @@ -305,7 +306,8 @@ describe('Pool callback', () => { }); it('connection fail handling', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPoolCallback({ connectionLimit: 2, minDelayValidation: 200 @@ -346,7 +348,8 @@ describe('Pool callback', () => { }); it('query fail handling', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPoolCallback({ connectionLimit: 2, minDelayValidation: 200 @@ -384,7 +387,7 @@ describe('Pool callback', () => { }); it('connection end', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); const pool = base.createPoolCallback({ connectionLimit: 2 }); setTimeout(() => { //check available connections in pool @@ -621,7 +624,7 @@ describe('Pool callback', () => { }); it('test minimum idle decrease', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); const pool = base.createPoolCallback({ connectionLimit: 10, diff --git a/test/integration/test-pool-event.js b/test/integration/test-pool-event.js index 18bebbc0..f20dd5ef 100644 --- a/test/integration/test-pool-event.js +++ b/test/integration/test-pool-event.js @@ -10,7 +10,7 @@ const os = require('os'); describe('Pool event', () => { before(function () { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); }); it('pool connection creation', function (done) { diff --git a/test/integration/test-pool.js b/test/integration/test-pool.js index 830d8cf8..8b073d18 100644 --- a/test/integration/test-pool.js +++ b/test/integration/test-pool.js @@ -13,7 +13,7 @@ describe('Pool', () => { const fileName = path.join(os.tmpdir(), Math.random() + 'tempStream.txt'); before(function () { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); }); after(function () { @@ -23,7 +23,7 @@ describe('Pool', () => { }); it('pool metaAsArray', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); const pool = base.createPool({ metaAsArray: true, multipleStatements: true, @@ -172,7 +172,8 @@ describe('Pool', () => { }); it('pool with wrong authentication connection', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); this.timeout(10000); const pool = base.createPool({ acquireTimeout: 4000, @@ -235,7 +236,8 @@ describe('Pool', () => { }); it('create pool', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); this.timeout(5000); const pool = base.createPool({ connectionLimit: 1 }); const initTime = Date.now(); @@ -262,7 +264,8 @@ describe('Pool', () => { }); it('create pool with multipleStatement', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); this.timeout(5000); const pool = base.createPool({ connectionLimit: 5, @@ -384,7 +387,8 @@ describe('Pool', () => { }); it('pool ending during requests', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); this.timeout(20000); const initial = new Date(); const pool = base.createPool({ connectionLimit: 1 }); @@ -493,7 +497,8 @@ describe('Pool', () => { }); it('pool getConnection timeout', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPool({ connectionLimit: 1, acquireTimeout: 200 }); let errorThrown = false; pool @@ -517,7 +522,8 @@ describe('Pool', () => { }); it('pool leakDetectionTimeout timeout', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPool({ connectionLimit: 1, acquireTimeout: 200, @@ -540,7 +546,8 @@ describe('Pool', () => { }); it('pool getConnection timeout recovery', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); this.timeout(5000); const pool = base.createPool({ connectionLimit: 10, @@ -592,7 +599,8 @@ describe('Pool', () => { }); it('pool query timeout', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); this.timeout(5000); const pool = base.createPool({ connectionLimit: 1, acquireTimeout: 500 }); const initTime = Date.now(); @@ -699,7 +707,8 @@ describe('Pool', () => { }); it('connection fail handling', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPool({ connectionLimit: 2, minDelayValidation: 200 @@ -743,7 +752,8 @@ describe('Pool', () => { }); it('query fail handling', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPool({ connectionLimit: 2, minDelayValidation: 200 @@ -785,7 +795,8 @@ describe('Pool', () => { }); it('connection end', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPool({ connectionLimit: 2 }); setTimeout(() => { //check available connections in pool @@ -819,7 +830,8 @@ describe('Pool', () => { }); it('connection release alias', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPool({ connectionLimit: 2 }); setTimeout(() => { //check available connections in pool @@ -853,7 +865,8 @@ describe('Pool', () => { }); it('connection destroy', function (done) { - if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL) this.skip(); + if (process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPool({ connectionLimit: 2 }); setTimeout(() => { //check available connections in pool @@ -1002,7 +1015,8 @@ describe('Pool', () => { }); it("ensure pipe ending doesn't stall connection", function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); //sequence engine only exist in MariaDB if (!shareConn.info.isMariaDB()) this.skip(); const ver = process.version.substring(1).split('.'); @@ -1045,7 +1059,8 @@ describe('Pool', () => { }); it('test minimum idle decrease', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); this.timeout(30000); const pool = base.createPool({ connectionLimit: 10, @@ -1097,7 +1112,8 @@ describe('Pool', () => { }); it('test minimum idle', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); this.timeout(5000); const pool = base.createPool({ connectionLimit: 10, @@ -1118,7 +1134,8 @@ describe('Pool', () => { }); it('pool immediate error', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); const pool = base.createPool({}); pool .getConnection() @@ -1135,7 +1152,8 @@ describe('Pool', () => { }); it('pool server defect timeout', function (done) { - if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE) this.skip(); + if (process.env.SKYSQL || process.env.MAXSCALE_TEST_DISABLE || process.env.SKYSQL_HA) + this.skip(); this.timeout(5000); const proxy = new Proxy({ port: Conf.baseConfig.port, diff --git a/test/integration/test-ssl.js b/test/integration/test-ssl.js index ab4ecbc3..fcd3f011 100644 --- a/test/integration/test-ssl.js +++ b/test/integration/test-ssl.js @@ -282,6 +282,7 @@ describe('ssl', function () { if ( !sslEnable || process.env.SKYSQL || + process.env.SKYSQL_HA || (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 4, 0)) || (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(8, 0, 0)) || shareConn.info.serverVersion.raw.includes('focal') @@ -518,7 +519,7 @@ describe('ssl', function () { }); it('Mutual authentication providing client certificate', function (done) { - if (process.env.SKYSQL) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!sslEnable) this.skip(); if (!ca || !clientKey || !clientCert) this.skip(); if (!base.utf8Collation()) this.skip(); @@ -543,7 +544,7 @@ describe('ssl', function () { }); it('Mutual authentication providing client keystore', function (done) { - if (process.env.SKYSQL != null) this.skip(); + if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!sslEnable) this.skip(); if (!ca || !clientKeystore) this.skip(); if (!base.utf8Collation()) this.skip(); From 6d6e069eaad89444e009c504367385b00d27b4ae Mon Sep 17 00:00:00 2001 From: rusher Date: Tue, 24 Nov 2020 13:06:42 +0100 Subject: [PATCH 10/21] [CONJS-155] Support for node.js 10.13-10.19 BigInt value is now decoded by connector, not using internally, not using node.js specific readBigInt64LE API, available only with 10.20+ version. --- lib/io/packet.js | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/lib/io/packet.js b/lib/io/packet.js index ddcb5d2f..d6ad9548 100644 --- a/lib/io/packet.js +++ b/lib/io/packet.js @@ -184,9 +184,22 @@ class Packet { } readInt64() { - const val = this.buf.readBigInt64LE(this.pos); + // could use readBigInt64LE when support would be 10.20+ + const val = + this.buf[this.pos + 4] + + this.buf[this.pos + 5] * 2 ** 8 + + this.buf[this.pos + 6] * 2 ** 16 + + (this.buf[this.pos + 7] << 24); + const vv = + (BigInt(val) << 32n) + + BigInt( + this.buf[this.pos] + + this.buf[this.pos + 1] * 2 ** 8 + + this.buf[this.pos + 2] * 2 ** 16 + + this.buf[this.pos + 3] * 2 ** 24 + ); this.pos += 8; - return val; + return vv; } readUnsignedLength() { From fc69120138147352d942e88d886f3fa98826a0d2 Mon Sep 17 00:00:00 2001 From: rusher Date: Tue, 24 Nov 2020 15:23:08 +0100 Subject: [PATCH 11/21] [misc] improving test reliability for unix env. --- test/integration/test-local-infile.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/test-local-infile.js b/test/integration/test-local-infile.js index 3b9da050..db28e2ee 100644 --- a/test/integration/test-local-infile.js +++ b/test/integration/test-local-infile.js @@ -295,7 +295,8 @@ describe('local-infile', () => { }) .then(() => { conn.end(); - done('must have thrown error'); + // expected result is to throw error, but super user might still read file. + done(); }) .catch((err) => { assert.equal(err.sqlState, '22000'); From 66a32dfc4fc38395dabc01350d5fc5de7d62f001 Mon Sep 17 00:00:00 2001 From: rusher Date: Tue, 24 Nov 2020 16:23:08 +0100 Subject: [PATCH 12/21] [misc] code style correction --- test/integration/test-batch-callback.js | 190 +++-- test/integration/test-batch-geometry-type.js | 438 +++++----- test/integration/test-batch.js | 790 ++++++++++--------- test/integration/test-cluster.js | 18 +- test/integration/test-local-infile.js | 2 +- 5 files changed, 717 insertions(+), 721 deletions(-) diff --git a/test/integration/test-batch-callback.js b/test/integration/test-batch-callback.js index 24b4b3dd..8ace2e4f 100644 --- a/test/integration/test-batch-callback.js +++ b/test/integration/test-batch-callback.js @@ -391,65 +391,65 @@ describe('batch callback', () => { ); conn.query('FLUSH TABLES', (err) => { conn.batch( - 'INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', + 'INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', + [ [ - [ - true, - 'john', - new Date('2001-12-31 23:59:58'), - new Date('2018-01-01 12:30:20.456789'), - { - type: 'Point', - coordinates: [10, 10] - } - ], - [ - false, - '12345678901', - null, - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [10, 20] - } - ], - [ - 0, - null, - new Date('2020-12-31 23:59:59'), - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [20, 20] - } - ] + true, + 'john', + new Date('2001-12-31 23:59:58'), + new Date('2018-01-01 12:30:20.456789'), + { + type: 'Point', + coordinates: [10, 10] + } ], - (err, res) => { - if (err) { - assert.isTrue( - err.message.includes("Data too long for column 't' at row 2"), - err.message - ); - conn.query('DROP TABLE simpleBatch', (err, res) => { - clearTimeout(timeout); - conn.end(() => { - done(); - }); - }); - } else { + [ + false, + '12345678901', + null, + new Date('2018-01-21 11:30:20.123456'), + { + type: 'Point', + coordinates: [10, 20] + } + ], + [ + 0, + null, + new Date('2020-12-31 23:59:59'), + new Date('2018-01-21 11:30:20.123456'), + { + type: 'Point', + coordinates: [20, 20] + } + ] + ], + (err, res) => { + if (err) { + assert.isTrue( + err.message.includes("Data too long for column 't' at row 2"), + err.message + ); + conn.query('DROP TABLE simpleBatch', (err, res) => { + clearTimeout(timeout); conn.end(() => { - if ( - (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || - (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) - ) { - //field truncated must have thrown error - done(new Error('must have throw error !')); - } else { - done(); - } + done(); }); - } + }); + } else { + conn.end(() => { + if ( + (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || + (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) + ) { + //field truncated must have thrown error + done(new Error('must have throw error !')); + } else { + done(); + } + }); } + } ); conn.query('select 1', (err, rows) => { if (err) { @@ -458,9 +458,8 @@ describe('batch callback', () => { }); } assert.deepEqual(rows, [{ 1: 1 }]); - }); + }); }); - }); }; @@ -577,54 +576,53 @@ describe('batch callback', () => { conn.query( 'CREATE TABLE batchWithStream(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); - conn.query('FLUSH TABLES', err => { + conn.query('FLUSH TABLES', (err) => { conn.batch( - 'INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', - [ - [1, stream1, 99], - [2, stream2, 98] - ], - (err, res) => { + 'INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', + [ + [1, stream1, 99], + [2, stream2, 98] + ], + (err, res) => { + if (err) { + return conn.end(() => { + done(err); + }); + } + assert.equal(res.affectedRows, 2); + conn.query('select * from `batchWithStream`', (err, res) => { if (err) { return conn.end(() => { done(err); }); } - assert.equal(res.affectedRows, 2); - conn.query('select * from `batchWithStream`', (err, res) => { - if (err) { - return conn.end(() => { - done(err); - }); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: 99, + id5: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: 98, + id5: 3 } - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: 99, - id5: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: 98, - id5: 3 - } - ]); - conn.query('DROP TABLE batchWithStream'); - clearTimeout(timeout); - conn.end(() => { - done(); - }); + ]); + conn.query('DROP TABLE batchWithStream'); + clearTimeout(timeout); + conn.end(() => { + done(); }); - } + }); + } ); }); - }); }; @@ -687,7 +685,7 @@ describe('batch callback', () => { conn.query( 'CREATE TABLE simpleNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' ); - conn.query('FLUSH TABLES', err => { + conn.query('FLUSH TABLES', (err) => { conn.batch( 'INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', [ @@ -854,7 +852,7 @@ describe('batch callback', () => { conn.query( 'CREATE TABLE streamNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); - conn.query('FLUSH TABLES', err => { + conn.query('FLUSH TABLES', (err) => { conn.batch( 'INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', [ diff --git a/test/integration/test-batch-geometry-type.js b/test/integration/test-batch-geometry-type.js index 50a31289..5fab0bf7 100644 --- a/test/integration/test-batch-geometry-type.js +++ b/test/integration/test-batch-geometry-type.js @@ -120,9 +120,9 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_line_batch (g LINESTRING)'); }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) .then(() => { return shareConn.batch('INSERT INTO gis_line_batch VALUES (?)', [ [ @@ -231,10 +231,10 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_polygon_batch (g POLYGON)'); }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) + .then(() => { return shareConn.batch('INSERT INTO gis_polygon_batch VALUES (?)', [ [ @@ -421,10 +421,10 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_multi_point_batch (g MULTIPOINT)'); }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) + .then(() => { return shareConn.batch('INSERT INTO gis_multi_point_batch VALUES (?)', [ [ @@ -526,10 +526,10 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_multi_line_batch (g MULTILINESTRING)'); }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) + .then(() => { return shareConn.batch('INSERT INTO gis_multi_line_batch VALUES (?)', [ [ @@ -687,10 +687,10 @@ describe('batch geometry type', () => { .then(() => { return shareConn.query('CREATE TABLE gis_multi_polygon_batch (g MULTIPOLYGON)'); }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - + .then(() => { + return shareConn.query('FLUSH TABLES'); + }) + .then(() => { return shareConn.batch('INSERT INTO gis_multi_polygon_batch VALUES (?)', [ [ @@ -959,213 +959,213 @@ describe('batch geometry type', () => { conn.query('CREATE TABLE gis_geometrycollection_batch (g GEOMETRYCOLLECTION)'); await shareConn.query('FLUSH TABLES'); await conn.batch('INSERT INTO gis_geometrycollection_batch VALUES (?)', [ - [ - { - type: 'GeometryCollection', - geometries: [ - { - type: 'Point', - coordinates: [10, 10] - }, - { - type: 'LineString', - coordinates: [ - [0, 0], - [0, 10], - [10, 0] - ] - }, - { - type: 'MultiPoint', - coordinates: [ - [0, 0], - [10, 10], - [10, 20], - [20, 20] - ] - }, - { - type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ], - [ - [16, 0], - [16, 23], - [16, 48] - ] - ] - }, - { - type: 'MultiPolygon', - coordinates: [ - [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] - ], - [ - [ - [59, 18], - [67, 18], - [67, 13], - [59, 13], - [59, 18] - ] - ] - ] - } - ] - } - ], - [ - { - type: 'GeometryCollection', - geometries: [ - { - type: 'Point', - coordinates: [10, 20] - } - ] - } - ], - [ - { - type: 'GeometryCollection', - geometries: [{}] - } - ], - [ - { - type: 'GeometryCollection', - geometries: [] - } - ], - [ - { - type: 'GeometryCollection' - } + [ + { + type: 'GeometryCollection', + geometries: [ + { + type: 'Point', + coordinates: [10, 10] + }, + { + type: 'LineString', + coordinates: [ + [0, 0], + [0, 10], + [10, 0] ] - ]); - const rows = await conn.query('SELECT * FROM gis_geometrycollection_batch'); - assert.deepEqual(rows, [ - { - g: { - type: 'GeometryCollection', - geometries: [ - { - type: 'Point', - coordinates: [10, 10] - }, - { - type: 'LineString', - coordinates: [ - [0, 0], - [0, 10], - [10, 0] - ] - }, - { - type: 'MultiPoint', - coordinates: [ - [0, 0], - [10, 10], - [10, 20], - [20, 20] - ] - }, - { - type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ], - [ - [16, 0], - [16, 23], - [16, 48] - ] + }, + { + type: 'MultiPoint', + coordinates: [ + [0, 0], + [10, 10], + [10, 20], + [20, 20] + ] + }, + { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] + ], + [ + [16, 0], + [16, 23], + [16, 48] + ] + ] + }, + { + type: 'MultiPolygon', + coordinates: [ + [ + [ + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] + ], + [ + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] ] - }, - { - type: 'MultiPolygon', - coordinates: [ - [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] - ], - [ - [ - [59, 18], - [67, 18], - [67, 13], - [59, 13], - [59, 18] - ] - ] + ], + [ + [ + [59, 18], + [67, 18], + [67, 13], + [59, 13], + [59, 18] ] - } - ] - } - }, - { - g: { - type: 'GeometryCollection', - geometries: [ - { - type: 'Point', - coordinates: [10, 20] - } + ] ] } - }, - { - g: { - type: 'GeometryCollection', - geometries: [] + ] + } + ], + [ + { + type: 'GeometryCollection', + geometries: [ + { + type: 'Point', + coordinates: [10, 20] } - }, - { - g: { - type: 'GeometryCollection', - geometries: [] + ] + } + ], + [ + { + type: 'GeometryCollection', + geometries: [{}] + } + ], + [ + { + type: 'GeometryCollection', + geometries: [] + } + ], + [ + { + type: 'GeometryCollection' + } + ] + ]); + const rows = await conn.query('SELECT * FROM gis_geometrycollection_batch'); + assert.deepEqual(rows, [ + { + g: { + type: 'GeometryCollection', + geometries: [ + { + type: 'Point', + coordinates: [10, 10] + }, + { + type: 'LineString', + coordinates: [ + [0, 0], + [0, 10], + [10, 0] + ] + }, + { + type: 'MultiPoint', + coordinates: [ + [0, 0], + [10, 10], + [10, 20], + [20, 20] + ] + }, + { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] + ], + [ + [16, 0], + [16, 23], + [16, 48] + ] + ] + }, + { + type: 'MultiPolygon', + coordinates: [ + [ + [ + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] + ], + [ + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] + ] + ], + [ + [ + [59, 18], + [67, 18], + [67, 13], + [59, 13], + [59, 18] + ] + ] + ] } - }, - { - g: { - type: 'GeometryCollection', - geometries: [] + ] + } + }, + { + g: { + type: 'GeometryCollection', + geometries: [ + { + type: 'Point', + coordinates: [10, 20] } - } - ]); - conn.end(); + ] + } + }, + { + g: { + type: 'GeometryCollection', + geometries: [] + } + }, + { + g: { + type: 'GeometryCollection', + geometries: [] + } + }, + { + g: { + type: 'GeometryCollection', + geometries: [] + } + } + ]); + conn.end(); }); }); diff --git a/test/integration/test-batch.js b/test/integration/test-batch.js index afbf83ba..60d2d7ef 100644 --- a/test/integration/test-batch.js +++ b/test/integration/test-batch.js @@ -22,7 +22,7 @@ describe('batch', () => { supportBulk = (Conf.baseConfig.bulk === undefined ? true : Conf.baseConfig.bulk) ? (shareConn.info.serverCapabilities & Capabilities.MARIADB_CLIENT_STMT_BULK_OPERATIONS) > 0 : false; - const row = await shareConn.query('SELECT @@max_allowed_packet as t') + const row = await shareConn.query('SELECT @@max_allowed_packet as t'); maxAllowedSize = row[0].t; if (testSize < maxAllowedSize) { bigBuf = Buffer.alloc(testSize); @@ -36,7 +36,7 @@ describe('batch', () => { beforeEach(async function () { //just to ensure shared connection is not closed by server due to inactivity - await shareConn.ping() + await shareConn.ping(); }); after(function () { @@ -47,10 +47,10 @@ describe('batch', () => { const simpleBatch = async (useCompression, useBulk, timezone) => { const conn = await base.createConnection({ - compress: useCompression, - bulk: useBulk, - timezone: timezone - }); + compress: useCompression, + bulk: useBulk, + timezone: timezone + }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 25000); @@ -66,103 +66,103 @@ describe('batch', () => { return 'blabla'; }; let res = await conn.batch('INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', [ - [ - true, - 'Ʉjo"h\u000An😎🌶\\\\', - new Date('2001-12-31 23:59:58+3'), - new Date('2018-01-01 12:30:20.456789+3'), - { - type: 'Point', - coordinates: [10, 10] - } - ], - [ - true, - f, - new Date('2001-12-31 23:59:58+3'), - new Date('2018-01-01 12:30:20.456789+3'), - { - type: 'Point', - coordinates: [10, 10] - } - ], - [ - false, - { name: 'jack\u000Aमस्', val: 'tt' }, - null, - new Date('2018-01-21 11:30:20.123456+3'), - { - type: 'Point', - coordinates: [10, 20] - } - ], - [ - 0, - null, - new Date('2020-12-31 23:59:59+3'), - new Date('2018-01-21 11:30:20.123456+3'), - { - type: 'Point', - coordinates: [20, 20] - } - ] - ]); - assert.equal(res.affectedRows, 4); - res = await conn.query('select * from `simpleBatch`'); - assert.deepEqual(res, [ + [ + true, + 'Ʉjo"h\u000An😎🌶\\\\', + new Date('2001-12-31 23:59:58+3'), + new Date('2018-01-01 12:30:20.456789+3'), { - id: 1, - id2: 1, - id3: 2, - t: 'Ʉjo"h\u000An😎🌶\\\\', - d: new Date('2001-12-31 23:59:58+3'), - d2: new Date('2018-01-01 12:30:20.456789+3'), - g: { - type: 'Point', - coordinates: [10, 10] - }, - id4: 3 - }, + type: 'Point', + coordinates: [10, 10] + } + ], + [ + true, + f, + new Date('2001-12-31 23:59:58+3'), + new Date('2018-01-01 12:30:20.456789+3'), { - id: 1, - id2: 1, - id3: 2, - t: 'blabla', - d: new Date('2001-12-31 23:59:58+3'), - d2: new Date('2018-01-01 12:30:20.456789+3'), - g: { - type: 'Point', - coordinates: [10, 10] - }, - id4: 3 - }, + type: 'Point', + coordinates: [10, 10] + } + ], + [ + false, + { name: 'jack\u000Aमस्', val: 'tt' }, + null, + new Date('2018-01-21 11:30:20.123456+3'), { - id: 1, - id2: 0, - id3: 2, - t: '{"name":"jack\\nमस्","val":"tt"}', - d: null, - d2: new Date('2018-01-21 11:30:20.123456+3'), - g: { - type: 'Point', - coordinates: [10, 20] - }, - id4: 3 - }, + type: 'Point', + coordinates: [10, 20] + } + ], + [ + 0, + null, + new Date('2020-12-31 23:59:59+3'), + new Date('2018-01-21 11:30:20.123456+3'), { - id: 1, - id2: 0, - id3: 2, - t: null, - d: new Date('2020-12-31 23:59:59+3'), - d2: new Date('2018-01-21 11:30:20.123456+3'), - g: { - type: 'Point', - coordinates: [20, 20] - }, - id4: 3 + type: 'Point', + coordinates: [20, 20] } - ]); + ] + ]); + assert.equal(res.affectedRows, 4); + res = await conn.query('select * from `simpleBatch`'); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'Ʉjo"h\u000An😎🌶\\\\', + d: new Date('2001-12-31 23:59:58+3'), + d2: new Date('2018-01-01 12:30:20.456789+3'), + g: { + type: 'Point', + coordinates: [10, 10] + }, + id4: 3 + }, + { + id: 1, + id2: 1, + id3: 2, + t: 'blabla', + d: new Date('2001-12-31 23:59:58+3'), + d2: new Date('2018-01-01 12:30:20.456789+3'), + g: { + type: 'Point', + coordinates: [10, 10] + }, + id4: 3 + }, + { + id: 1, + id2: 0, + id3: 2, + t: '{"name":"jack\\nमस्","val":"tt"}', + d: null, + d2: new Date('2018-01-21 11:30:20.123456+3'), + g: { + type: 'Point', + coordinates: [10, 20] + }, + id4: 3 + }, + { + id: 1, + id2: 0, + id3: 2, + t: null, + d: new Date('2020-12-31 23:59:59+3'), + d2: new Date('2018-01-21 11:30:20.123456+3'), + g: { + type: 'Point', + coordinates: [20, 20] + }, + id4: 3 + } + ]); conn.query('DROP TABLE simpleBatch'); clearTimeout(timeout); @@ -213,12 +213,11 @@ describe('batch', () => { }; const simpleBatchEncodingCP1251 = async (useCompression, useBulk, timezone) => { - const conn = await base - .createConnection({ - compress: useCompression, - bulk: useBulk, - collation: 'CP1251_GENERAL_CI' - }); + const conn = await base.createConnection({ + compress: useCompression, + bulk: useBulk, + collation: 'CP1251_GENERAL_CI' + }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 25000); @@ -247,18 +246,18 @@ describe('batch', () => { console.log(conn.info.getLastPackets()); }, 25000); try { - await conn.batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ - [1, 'john'], - [2, 'jack'] - ]); - throw new Error('must have thrown error !'); - } catch(err) { + await conn.batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ + [1, 'john'], + [2, 'jack'] + ]); + throw new Error('must have thrown error !'); + } catch (err) { assert.isTrue(err != null); assert.isTrue(err.message.includes(" doesn't exist")); assert.isTrue( - err.message.includes( - "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'john'],[2,'jack']]" - ) + err.message.includes( + "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'john'],[2,'jack']]" + ) ); assert.equal(err.errno, 1146); assert.equal(err.sqlState, '42S02'); @@ -286,10 +285,10 @@ describe('batch', () => { const simpleBatchErrorSplit = async (useCompression, useBulk, timezone) => { const conn = await base.createConnection({ - compress: useCompression, - bulk: useBulk, - timezone: timezone - }); + compress: useCompression, + bulk: useBulk, + timezone: timezone + }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 25000); @@ -333,19 +332,16 @@ describe('batch', () => { ] ]); if ( - (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || - (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) + (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || + (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) ) { //field truncated must have thrown error throw new Error('must have throw error !'); } } catch (err) { - assert.isTrue( - err.message.includes("Data too long for column 't' at row 2"), - err.message - ); + assert.isTrue(err.message.includes("Data too long for column 't' at row 2"), err.message); } - conn.query('DROP TABLE simpleBatch') + conn.query('DROP TABLE simpleBatch'); conn.end(); clearTimeout(timeout); }; @@ -380,10 +376,10 @@ describe('batch', () => { } catch (err) { if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { assert.isTrue( - err.message.includes( - 'This command is not supported in the prepared statement protocol yet' - ), - err.message + err.message.includes( + 'This command is not supported in the prepared statement protocol yet' + ), + err.message ); } } @@ -392,13 +388,12 @@ describe('batch', () => { }; const bigBatchWith16mMaxAllowedPacket = async (useCompression, useBulk) => { - const conn = await base - .createConnection({ - compress: useCompression, - maxAllowedPacket: 16 * 1024 * 1024, - bulk: useBulk, - logPackets: true - }); + const conn = await base.createConnection({ + compress: useCompression, + maxAllowedPacket: 16 * 1024 * 1024, + bulk: useBulk, + logPackets: true + }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 200000); @@ -412,43 +407,44 @@ describe('batch', () => { for (let i = 0; i < 1000000; i++) { values.push([i, str]); } - let res = await conn - .batch('INSERT INTO `bigBatchWith16mMaxAllowedPacket` values (1, ?, 2, ?, 3)', values); + let res = await conn.batch( + 'INSERT INTO `bigBatchWith16mMaxAllowedPacket` values (1, ?, 2, ?, 3)', + values + ); assert.equal(res.affectedRows, 1000000); let currRow = 0; return new Promise(function (resolve, reject) { conn - .queryStream('select * from `bigBatchWith16mMaxAllowedPacket`') - .on('error', (err) => { - reject(new Error('must not have thrown any error !')); - }) - .on('data', (row) => { - assert.deepEqual(row, { - id: 1, - id2: currRow, - id3: 2, - t: str, - id4: 3 - }); - currRow++; - }) - .on('end', () => { - assert.equal(1000000, currRow); - conn.query('DROP TABLE bigBatchWith16mMaxAllowedPacket'); - clearTimeout(timeout); - conn.end(); - resolve(); + .queryStream('select * from `bigBatchWith16mMaxAllowedPacket`') + .on('error', (err) => { + reject(new Error('must not have thrown any error !')); + }) + .on('data', (row) => { + assert.deepEqual(row, { + id: 1, + id2: currRow, + id3: 2, + t: str, + id4: 3 }); + currRow++; + }) + .on('end', () => { + assert.equal(1000000, currRow); + conn.query('DROP TABLE bigBatchWith16mMaxAllowedPacket'); + clearTimeout(timeout); + conn.end(); + resolve(); + }); }); }; const bigBatchWith4mMaxAllowedPacket = async (useCompression, useBulk) => { - const conn = await base - .createConnection({ - compress: useCompression, - bulk: useBulk, - logPackets: true - }); + const conn = await base.createConnection({ + compress: useCompression, + bulk: useBulk, + logPackets: true + }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 200000); @@ -461,42 +457,44 @@ describe('batch', () => { for (let i = 0; i < 1000000; i++) { values.push([i, str]); } - let res = await conn.batch('INSERT INTO `bigBatchWith4mMaxAllowedPacket` values (1, ?, 2, ?, 3)', values); + let res = await conn.batch( + 'INSERT INTO `bigBatchWith4mMaxAllowedPacket` values (1, ?, 2, ?, 3)', + values + ); assert.equal(res.affectedRows, 1000000); let currRow = 0; return new Promise(function (resolve, reject) { conn - .queryStream('select * from `bigBatchWith4mMaxAllowedPacket`') - .on('error', (err) => { - reject(new Error('must not have thrown any error !')); - }) - .on('data', (row) => { - assert.deepEqual(row, { - id: 1, - id2: currRow, - id3: 2, - t: str, - id4: 3 - }); - currRow++; - }) - .on('end', () => { - assert.equal(1000000, currRow); - conn.query('DROP TABLE bigBatchWith4mMaxAllowedPacket'); - clearTimeout(timeout); - conn.end(); - resolve(); + .queryStream('select * from `bigBatchWith4mMaxAllowedPacket`') + .on('error', (err) => { + reject(new Error('must not have thrown any error !')); + }) + .on('data', (row) => { + assert.deepEqual(row, { + id: 1, + id2: currRow, + id3: 2, + t: str, + id4: 3 }); + currRow++; + }) + .on('end', () => { + assert.equal(1000000, currRow); + conn.query('DROP TABLE bigBatchWith4mMaxAllowedPacket'); + clearTimeout(timeout); + conn.end(); + resolve(); + }); }); }; const bigBatchError = async (useCompression, useBulk) => { - const conn = await base - .createConnection({ - compress: useCompression, - bulk: useBulk, - logPackets: true - }); + const conn = await base.createConnection({ + compress: useCompression, + bulk: useBulk, + logPackets: true + }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 200000); @@ -525,11 +523,13 @@ describe('batch', () => { 'CREATE TABLE singleBigInsertWithoutMaxAllowedPacket(id int, id2 int, id3 int, t longtext, id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLE'); - const res = await conn - .batch('INSERT INTO `singleBigInsertWithoutMaxAllowedPacket` values (1, ?, 2, ?, 3)', [ - [1, bigBuf], - [2, 'john'] - ]); + const res = await conn.batch( + 'INSERT INTO `singleBigInsertWithoutMaxAllowedPacket` values (1, ?, 2, ?, 3)', + [ + [1, bigBuf], + [2, 'john'] + ] + ); assert.equal(res.affectedRows, 2); const rows = await conn.query('select * from `singleBigInsertWithoutMaxAllowedPacket`'); assert.deepEqual(rows, [ @@ -556,12 +556,11 @@ describe('batch', () => { const batchWithStream = async (useCompression, useBulk) => { const stream1 = fs.createReadStream(fileName); const stream2 = fs.createReadStream(fileName); - const conn = await base - .createConnection({ - compress: useCompression, - bulk: useBulk, - logPackets: true - }); + const conn = await base.createConnection({ + compress: useCompression, + bulk: useBulk, + logPackets: true + }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 25000); @@ -571,9 +570,9 @@ describe('batch', () => { ); await conn.query('FLUSH TABLES'); let res = await conn.batch('INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', [ - [1, stream1, 99], - [2, stream2, 98] - ]); + [1, stream1, 99], + [2, stream2, 98] + ]); assert.equal(res.affectedRows, 2); res = await conn.query('select * from `batchWithStream`'); assert.deepEqual(res, [ @@ -602,24 +601,23 @@ describe('batch', () => { const batchErrorWithStream = async (useCompression, useBulk) => { const stream1 = fs.createReadStream(fileName); const stream2 = fs.createReadStream(fileName); - const conn = await base.createConnection({compress: useCompression, bulk: useBulk}); + const conn = await base.createConnection({ compress: useCompression, bulk: useBulk }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 25000); try { - await conn - .batch('INSERT INTO batchErrorWithStream values (1, ?, 2, ?, ?, 3)', [ - [1, stream1, 99], - [2, stream2, 98] - ]); + await conn.batch('INSERT INTO batchErrorWithStream values (1, ?, 2, ?, ?, 3)', [ + [1, stream1, 99], + [2, stream2, 98] + ]); throw new Error('must have thrown error !'); } catch (err) { assert.isTrue(err != null); assert.isTrue(err.message.includes(" doesn't exist")); assert.isTrue( - err.message.includes( - 'sql: INSERT INTO batchErrorWithStream values (1, ?, 2, ?, ?, 3) - parameters:[[1,[object Object],99],[2,[object Object],98]]' - ) + err.message.includes( + 'sql: INSERT INTO batchErrorWithStream values (1, ?, 2, ?, ?, 3) - parameters:[[1,[object Object],99],[2,[object Object],98]]' + ) ); assert.equal(err.errno, 1146); assert.equal(err.sqlState, '42S02'); @@ -635,12 +633,11 @@ describe('batch', () => { if (i % 100000 === 0) values.push([i, fs.createReadStream(fileName), i * 2]); else values.push([i, str, i * 2]); } - const conn = await base - .createConnection({ - compress: useCompression, - bulk: useBulk, - logPackets: true - }); + const conn = await base.createConnection({ + compress: useCompression, + bulk: useBulk, + logPackets: true + }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 200000); @@ -649,34 +646,37 @@ describe('batch', () => { 'CREATE TABLE bigBatchWithStreams(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); - let res = await conn.batch('INSERT INTO `bigBatchWithStreams` values (1, ?, 2, ?, ?, 3)', values); + let res = await conn.batch( + 'INSERT INTO `bigBatchWithStreams` values (1, ?, 2, ?, ?, 3)', + values + ); assert.equal(res.affectedRows, 1000000); let currRow = 0; return new Promise(function (resolve, reject) { - conn - .queryStream('select * from `bigBatchWithStreams`') - .on('error', (err) => { - reject(new Error('must not have thrown any error !')); - }) - .on('data', (row) => { - assert.deepEqual(row, { - id: 1, - id2: currRow, - id3: 2, - t: str, - id4: currRow * 2, - id5: 3 - }); - currRow++; - }) - .on('end', () => { - assert.equal(1000000, currRow); - conn.query('DROP TABLE bigBatchWithStreams'); - clearTimeout(timeout); - conn.end(); - resolve(); - }); + conn + .queryStream('select * from `bigBatchWithStreams`') + .on('error', (err) => { + reject(new Error('must not have thrown any error !')); + }) + .on('data', (row) => { + assert.deepEqual(row, { + id: 1, + id2: currRow, + id3: 2, + t: str, + id4: currRow * 2, + id5: 3 }); + currRow++; + }) + .on('end', () => { + assert.equal(1000000, currRow); + conn.query('DROP TABLE bigBatchWithStreams'); + clearTimeout(timeout); + conn.end(); + resolve(); + }); + }); }; const bigBatchErrorWithStreams = async (useCompression, useBulk) => { @@ -686,25 +686,23 @@ describe('batch', () => { else values.push([i, str, i * 2]); } - const conn = await base - .createConnection({ - compress: useCompression, - bulk: useBulk, - logPackets: true - }); - const timeout = setTimeout(() => { - console.log(conn.info.getLastPackets()); - }, 200000); - try { - await conn - .batch('INSERT INTO `blabla` values (1, ?, 2, ?, ?, 3)', values); - throw new Error('must have thrown error !'); - } catch (err) { - const rows = await conn.query('select 1'); - assert.deepEqual(rows, [{ 1: 1 }]); - conn.end(); - clearTimeout(timeout); - } + const conn = await base.createConnection({ + compress: useCompression, + bulk: useBulk, + logPackets: true + }); + const timeout = setTimeout(() => { + console.log(conn.info.getLastPackets()); + }, 200000); + try { + await conn.batch('INSERT INTO `blabla` values (1, ?, 2, ?, ?, 3)', values); + throw new Error('must have thrown error !'); + } catch (err) { + const rows = await conn.query('select 1'); + assert.deepEqual(rows, [{ 1: 1 }]); + conn.end(); + clearTimeout(timeout); + } }; const simpleNamedPlaceHolders = async (useBulk) => { @@ -717,28 +715,30 @@ describe('batch', () => { 'CREATE TABLE simpleNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); - let res = await conn - .batch('INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', [ + let res = await conn.batch( + 'INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', + [ { param_1: 1, param_2: 'john' }, { param_1: 2, param_2: 'jack' } - ]); + ] + ); assert.equal(res.affectedRows, 2); res = await conn.query('select * from `simpleNamedPlaceHolders`'); assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'john', - id4: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'jack', - id4: 3 - } + { + id: 1, + id2: 1, + id3: 2, + t: 'john', + id4: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'jack', + id4: 3 + } ]); conn.query('DROP TABLE simpleNamedPlaceHolders'); conn.end(); @@ -751,19 +751,18 @@ describe('batch', () => { console.log(conn.info.getLastPackets()); }, 25000); try { - await conn - .batch('INSERT INTO blabla values (1, :param_1, 2, :param_2, 3)', [ - { param_1: 1, param_2: 'john' }, - { param_1: 2, param_2: 'jack' } - ]); - throw new Error('must have thrown error !'); + await conn.batch('INSERT INTO blabla values (1, :param_1, 2, :param_2, 3)', [ + { param_1: 1, param_2: 'john' }, + { param_1: 2, param_2: 'jack' } + ]); + throw new Error('must have thrown error !'); } catch (err) { assert.isTrue(err != null); assert.isTrue(err.message.includes(" doesn't exist")); assert.isTrue( - err.message.includes( - "sql: INSERT INTO blabla values (1, :param_1, 2, :param_2, 3) - parameters:[{'param_1':1,'param_2':'john'},{'param_1':2,'param_2':'jack'}]" - ) + err.message.includes( + "sql: INSERT INTO blabla values (1, :param_1, 2, :param_2, 3) - parameters:[{'param_1':1,'param_2':'john'},{'param_1':2,'param_2':'jack'}]" + ) ); assert.equal(err.errno, 1146); assert.equal(err.sqlState, '42S02'); @@ -779,11 +778,10 @@ describe('batch', () => { console.log(conn.info.getLastPackets()); }, 25000); try { - const res = await conn - .batch('SELECT :id2 as id, :id1 as t', [ - { id2: 1, id1: 'john' }, - { id1: 'jack', id2: 2 } - ]); + const res = await conn.batch('SELECT :id2 as id, :id1 as t', [ + { id2: 1, id1: 'john' }, + { id1: 'jack', id2: 2 } + ]); if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { conn.end(); throw new Error('Must have thrown an exception'); @@ -805,9 +803,9 @@ describe('batch', () => { } catch (err) { if (useBulk & conn.info.isMariaDB() && conn.info.hasMinVersion(10, 2, 7)) { assert.isTrue( - err.message.includes( - 'This command is not supported in the prepared statement protocol yet' - ) + err.message.includes( + 'This command is not supported in the prepared statement protocol yet' + ) ); } } @@ -829,38 +827,40 @@ describe('batch', () => { for (let i = 0; i < 1000000; i++) { values.push({ id1: i, id2: str }); } - const res = await conn.batch('INSERT INTO `more16MNamedPlaceHolders` values (1, :id1, 2, :id2, 3)', values); + const res = await conn.batch( + 'INSERT INTO `more16MNamedPlaceHolders` values (1, :id1, 2, :id2, 3)', + values + ); assert.equal(res.affectedRows, 1000000); let currRow = 0; return new Promise(function (resolve, reject) { - conn - .queryStream('select * from `more16MNamedPlaceHolders`') - .on('error', (err) => { - reject(new Error('must not have thrown any error !')); - }) - .on('data', (row) => { - assert.deepEqual(row, { - id: 1, - id2: currRow, - id3: 2, - t: str, - id4: 3 - }); - currRow++; - }) - .on('end', () => { - assert.equal(1000000, currRow); - conn.query('DROP TABLE more16MNamedPlaceHolders'); - clearTimeout(timeout); - conn.end(); - resolve(); - }); + conn + .queryStream('select * from `more16MNamedPlaceHolders`') + .on('error', (err) => { + reject(new Error('must not have thrown any error !')); + }) + .on('data', (row) => { + assert.deepEqual(row, { + id: 1, + id2: currRow, + id3: 2, + t: str, + id4: 3 }); + currRow++; + }) + .on('end', () => { + assert.equal(1000000, currRow); + conn.query('DROP TABLE more16MNamedPlaceHolders'); + clearTimeout(timeout); + conn.end(); + resolve(); + }); + }); }; const more16MSingleNamedPlaceHolders = async function (useBulk) { - const conn = await base - .createConnection({ namedPlaceholders: true, bulk: useBulk }); + const conn = await base.createConnection({ namedPlaceholders: true, bulk: useBulk }); const timeout = setTimeout(() => { console.log(conn.info.getLastPackets()); }, 200000); @@ -869,11 +869,13 @@ describe('batch', () => { 'CREATE TABLE more16MSingleNamedPlaceHolders(id int, id2 int, id3 int, t longtext, id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); - const res = await conn - .batch('INSERT INTO `more16MSingleNamedPlaceHolders` values (1, :id, 2, :id2, 3)', [ - { id: 1, id2: bigBuf }, - { id: 2, id2: 'john' } - ]); + const res = await conn.batch( + 'INSERT INTO `more16MSingleNamedPlaceHolders` values (1, :id, 2, :id2, 3)', + [ + { id: 1, id2: bigBuf }, + { id: 2, id2: 'john' } + ] + ); assert.equal(res.affectedRows, 2); const rows = await conn.query('select * from `more16MSingleNamedPlaceHolders`'); assert.deepEqual(rows, [ @@ -909,10 +911,13 @@ describe('batch', () => { 'CREATE TABLE streamNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLE'); - const res = await conn.batch('INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', [ - { id1: 1, id3: stream1, id4: 99, id5: 6 }, - { id1: 2, id3: stream2, id4: 98 } - ]); + const res = await conn.batch( + 'INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', + [ + { id1: 1, id3: stream1, id4: 99, id5: 6 }, + { id1: 2, id3: stream2, id4: 98 } + ] + ); assert.equal(res.affectedRows, 2); const rows = await conn.query('select * from `streamNamedPlaceHolders`'); assert.deepEqual(rows, [ @@ -948,9 +953,9 @@ describe('batch', () => { try { await conn.batch('INSERT INTO blabla values (1, :id1, 2, :id3, :id7, 3)', [ - { id1: 1, id3: stream1, id4: 99, id5: 6 }, - { id1: 2, id3: stream2, id4: 98 } - ]); + { id1: 1, id3: stream1, id4: 99, id5: 6 }, + { id1: 2, id3: stream2, id4: 98 } + ]); throw new Error('must have thrown error !'); } catch (err) { assert.isTrue(err != null); @@ -990,36 +995,36 @@ describe('batch', () => { ); await conn.query('FLUSH TABLES'); const res = await conn.batch( - 'INSERT INTO `stream16MNamedPlaceHolders` values (1, :id1, 2, :id2, :id3, 3)', - values - ); + 'INSERT INTO `stream16MNamedPlaceHolders` values (1, :id1, 2, :id2, :id3, 3)', + values + ); assert.equal(res.affectedRows, 1000000); let currRow = 0; return new Promise(function (resolve, reject) { - conn - .queryStream('select * from `stream16MNamedPlaceHolders`') - .on('error', (err) => { - reject(new Error('must not have thrown any error !')); - }) - .on('data', (row) => { - assert.deepEqual(row, { - id: 1, - id2: currRow, - id3: 2, - t: str, - id4: currRow * 2, - id5: 3 - }); - currRow++; - }) - .on('end', () => { - assert.equal(1000000, currRow); - conn.query('DROP TABLE stream16MNamedPlaceHolders'); - clearTimeout(timeout); - conn.end(); - resolve(); - }); + conn + .queryStream('select * from `stream16MNamedPlaceHolders`') + .on('error', (err) => { + reject(new Error('must not have thrown any error !')); + }) + .on('data', (row) => { + assert.deepEqual(row, { + id: 1, + id2: currRow, + id3: 2, + t: str, + id4: currRow * 2, + id5: 3 }); + currRow++; + }) + .on('end', () => { + assert.equal(1000000, currRow); + conn.query('DROP TABLE stream16MNamedPlaceHolders'); + clearTimeout(timeout); + conn.end(); + resolve(); + }); + }); }; describe('standard question mark using bulk', () => { @@ -1102,11 +1107,11 @@ describe('batch', () => { conn.end(); throw new Error('expect an error !'); } catch (err) { - assert.isTrue( - err.message.includes('Parameter at position 2 is undefined for values 1', err.message) - ); - conn.end(); - }; + assert.isTrue( + err.message.includes('Parameter at position 2 is undefined for values 1', err.message) + ); + conn.end(); + } }); it('simple batch offset date', async function () { @@ -1377,13 +1382,12 @@ describe('batch', () => { if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); const conn = await base.createConnection({ compress: useCompression, bulk: false }); try { - await conn - .batch('INSERT INTO `blabla` values (?)'); + await conn.batch('INSERT INTO `blabla` values (?)'); throw new Error('expect an error !'); } catch (err) { assert.isTrue(err.message.includes('Batch must have values set'), err.message); conn.end(); - }; + } }); it('rewrite split for maxAllowedPacket', async function () { @@ -1391,11 +1395,11 @@ describe('batch', () => { const conn = await base.createConnection({ bulk: false, maxAllowedPacket: 150 }); conn.query('DROP TABLE IF EXISTS my_table'); conn.query('CREATE TABLE my_table(id int, val LONGTEXT)'); - await conn.query('FLUSH TABLES'); + await conn.query('FLUSH TABLES'); await conn.batch('INSERT INTO my_table(id,val) VALUES( ?, ?) ', [ - [1, t], - [2, t] - ]); + [1, t], + [2, t] + ]); const res = await conn.query('SELECT * FROM my_table'); assert.deepEqual(res, [ { id: 1, val: t }, @@ -1413,8 +1417,8 @@ describe('batch', () => { throw new Error('expect an error !'); } catch (err) { assert.isTrue( - err.message.includes('Parameter at position 1 is not set for values 1'), - err.message + err.message.includes('Parameter at position 1 is not set for values 1'), + err.message ); conn.end(); } @@ -1429,17 +1433,16 @@ describe('batch', () => { if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); const conn = await base.createConnection({ compress: useCompression, bulk: false }); try { - await conn - .batch('INSERT INTO `blabla` values (?,?)', [ - [1, 2], - [1, undefined] - ]); + await conn.batch('INSERT INTO `blabla` values (?,?)', [ + [1, 2], + [1, undefined] + ]); conn.end(); throw new Error('expect an error !'); } catch (err) { assert.isTrue( - err.message.includes('Parameter at position 2 is undefined for values 1'), - err.message + err.message.includes('Parameter at position 2 is undefined for values 1'), + err.message ); conn.end(); } @@ -1473,19 +1476,18 @@ describe('batch', () => { console.log(conn.info.getLastPackets()); }, 25000); try { - await conn - .batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ - [1, 'john"'], - [2, 'jac"k'] - ]); + await conn.batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ + [1, 'john"'], + [2, 'jac"k'] + ]); throw new Error('must have thrown error !'); } catch (err) { assert.isTrue(err != null); assert.isTrue(err.message.includes(" doesn't exist")); const expectedMsg = - debugLen === 80 - ? "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'jo...]" - : 'INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?...'; + debugLen === 80 + ? "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'jo...]" + : 'INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?...'; assert.isTrue(err.message.includes(expectedMsg)); assert.equal(err.errno, 1146); assert.equal(err.sqlState, '42S02'); @@ -1603,17 +1605,17 @@ describe('batch', () => { }, 25000); try { await conn.batch('INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3)', [ - [1, 'john"'], - [2, 'jac"k'] - ]); + [1, 'john"'], + [2, 'jac"k'] + ]); throw new Error('must have thrown error !'); } catch (err) { assert.isTrue(err != null); assert.isTrue(err.message.includes(" doesn't exist")); const expectedMsg = - debugLen === 80 - ? "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'jo...]" - : 'INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?...'; + debugLen === 80 + ? "INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?, 3) - parameters:[[1,'jo...]" + : 'INSERT INTO simpleBatchErrorMsg values (1, ?, 2, ?...'; assert.isTrue(err.message.includes(expectedMsg)); assert.equal(err.errno, 1146); assert.equal(err.sqlState, '42S02'); diff --git a/test/integration/test-cluster.js b/test/integration/test-cluster.js index 471eef1c..fd588deb 100644 --- a/test/integration/test-cluster.js +++ b/test/integration/test-cluster.js @@ -659,19 +659,15 @@ describe('cluster', function () { await filteredCluster.query('DROP TABLE IF EXISTS filteredSimpleBatch'); await filteredCluster.query( - 'CREATE TABLE filteredSimpleBatch(id int not null primary key auto_increment, val int)' - ); + 'CREATE TABLE filteredSimpleBatch(id int not null primary key auto_increment, val int)' + ); await filteredCluster.query('FLUSH TABLES'); const promises = []; - for (let i = 0; i < 60; i++) { - promises.push( - filteredCluster.batch('INSERT INTO filteredSimpleBatch(val) values (?)', [ - [1], - [2], - [3] - ]) - ); - } + for (let i = 0; i < 60; i++) { + promises.push( + filteredCluster.batch('INSERT INTO filteredSimpleBatch(val) values (?)', [[1], [2], [3]]) + ); + } await Promise.all(promises); const res = await filteredCluster.query('SELECT count(*) as nb FROM filteredSimpleBatch'); expect(res[0].nb).to.equal(180); diff --git a/test/integration/test-local-infile.js b/test/integration/test-local-infile.js index db28e2ee..9a1d36c9 100644 --- a/test/integration/test-local-infile.js +++ b/test/integration/test-local-infile.js @@ -295,7 +295,7 @@ describe('local-infile', () => { }) .then(() => { conn.end(); - // expected result is to throw error, but super user might still read file. + // expected result is to throw error, but super user might still read file. done(); }) .catch((err) => { From 2e0f0014303c5bf0ba847d6058b4853cbf630983 Mon Sep 17 00:00:00 2001 From: rusher Date: Wed, 25 Nov 2020 10:14:30 +0100 Subject: [PATCH 13/21] [CONJS-156] Ensure setting capability PLUGIN_AUTH only if server has it --- lib/cmd/change-user.js | 2 +- lib/cmd/handshake/client-capabilities.js | 7 +++++-- lib/cmd/handshake/client-handshake-response.js | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/cmd/change-user.js b/lib/cmd/change-user.js index da560214..5af0b594 100644 --- a/lib/cmd/change-user.js +++ b/lib/cmd/change-user.js @@ -60,7 +60,7 @@ class ChangeUser extends Handshake { out.writeInt8(0); } - if (this.opts.connectAttributes && info.serverCapabilities & Capabilities.CONNECT_ATTRS) { + if (info.clientCapabilities & Capabilities.CONNECT_ATTRS) { out.writeInt8(0xfc); let initPos = out.pos; //save position, assuming connection attributes length will be less than 2 bytes length out.writeInt16(0); diff --git a/lib/cmd/handshake/client-capabilities.js b/lib/cmd/handshake/client-capabilities.js index 336a13c7..0481243f 100644 --- a/lib/cmd/handshake/client-capabilities.js +++ b/lib/cmd/handshake/client-capabilities.js @@ -17,14 +17,17 @@ module.exports.init = function (opts, info) { Capabilities.MULTI_RESULTS | Capabilities.PS_MULTI_RESULTS | Capabilities.SESSION_TRACK | - Capabilities.PLUGIN_AUTH | Capabilities.PLUGIN_AUTH_LENENC_CLIENT_DATA; if ((info.serverCapabilities & Capabilities.MYSQL) === 0n) { capabilities |= Capabilities.MARIADB_CLIENT_EXTENDED_TYPE_INFO; } - if (opts.connectAttributes) { + if (info.serverCapabilities & Capabilities.PLUGIN_AUTH) { + capabilities |= Capabilities.PLUGIN_AUTH; + } + + if (opts.connectAttributes && info.serverCapabilities & Capabilities.CONNECT_ATTRS) { capabilities |= Capabilities.CONNECT_ATTRS; } diff --git a/lib/cmd/handshake/client-handshake-response.js b/lib/cmd/handshake/client-handshake-response.js index 35571f1b..652f1410 100644 --- a/lib/cmd/handshake/client-handshake-response.js +++ b/lib/cmd/handshake/client-handshake-response.js @@ -74,7 +74,7 @@ module.exports.send = function send(cmd, out, opts, pluginName, info) { out.writeInt8(0); } - if (opts.connectAttributes && info.serverCapabilities & Capabilities.CONNECT_ATTRS) { + if (info.clientCapabilities & Capabilities.CONNECT_ATTRS) { out.writeInt8(0xfc); let initPos = out.pos; //save position, assuming connection attributes length will be less than 2 bytes length out.writeInt16(0); From 41c981dcc2f68886f5df0236653a6536596d9bf6 Mon Sep 17 00:00:00 2001 From: Anel Husakovic Date: Wed, 25 Nov 2020 18:59:23 +0100 Subject: [PATCH 14/21] Update developer guide --- documentation/developers-guide.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/documentation/developers-guide.md b/documentation/developers-guide.md index 958b39dc..62a4dc86 100644 --- a/documentation/developers-guide.md +++ b/documentation/developers-guide.md @@ -17,12 +17,22 @@ Before submitting a pull request to the project, run local and continuous integr The repository contains a series of tests to evaluate the Connector and to make sure it can connect to and operate on MariaDB with the new code. Run local tests using npm. -In order for these tests to pass, you need to have a MariaDB or MySQL server installed, which by default it assumes is running at localhost:3306 with a database named `testn` and a user `root` without a password. Once this is set up, you can run the tests with npm: +In order for these tests to pass, you need to have a MariaDB or MySQL server installed, which by default it assumes is running at localhost:3306 with a database named `testn` and a user `root` without a password. +Alternatively you will need to create a user [with grant privilege](https://mariadb.com/kb/en/grant/#the-grant-option-privilege). +Example, with `root` user create the new user and grant privileges: +``` +MariaDB [(none)]> create user anel@localhost identified by ('testpass'); +MariaDB [(none)]> grant all privileges on *.* to anel@localhost WITH GRANT OPTION; +``` +Once this is set up, you can run the tests with npm: ``` $ npm run test:base ``` - +Or via environment variables with specific user: +``` +$ TEST_DB=testn TEST_USER=anel TEST_PASSWORD=testpass TEST_PORT=3306 TEST_HOST=localhost npm run test:base +``` The tests retrieve the host, password, database and port number from environmental variables, which you can manually set if you want to connect to MariaDB in other ways. * `TEST_HOST` Hostname. By default, localhost. From 83f0d513ab2b01fa947a655a24005609ead1a05a Mon Sep 17 00:00:00 2001 From: rusher Date: Fri, 27 Nov 2020 16:32:32 +0100 Subject: [PATCH 15/21] [misc] ensure that connecting to database only if server has capability --- lib/cmd/handshake/client-capabilities.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cmd/handshake/client-capabilities.js b/lib/cmd/handshake/client-capabilities.js index 0481243f..41f8ed5d 100644 --- a/lib/cmd/handshake/client-capabilities.js +++ b/lib/cmd/handshake/client-capabilities.js @@ -48,7 +48,7 @@ module.exports.init = function (opts, info) { capabilities |= Capabilities.DEPRECATE_EOF; } - if (opts.database) { + if (opts.database && info.serverCapabilities & Capabilities.CONNECT_WITH_DB) { capabilities |= Capabilities.CONNECT_WITH_DB; } From bb297542075f1c1bf2373404287f32c6ae15eafc Mon Sep 17 00:00:00 2001 From: rusher Date: Fri, 27 Nov 2020 21:43:53 +0100 Subject: [PATCH 16/21] [misc] ensure test stability with slow env --- test/integration/test-pool.js | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/test/integration/test-pool.js b/test/integration/test-pool.js index 8b073d18..158c244b 100644 --- a/test/integration/test-pool.js +++ b/test/integration/test-pool.js @@ -1065,7 +1065,7 @@ describe('Pool', () => { const pool = base.createPool({ connectionLimit: 10, minimumIdle: 8, - idleTimeout: 2, + idleTimeout: 1, acquireTimeout: 20000 }); @@ -1073,7 +1073,8 @@ describe('Pool', () => { for (let i = 0; i < 5000; i++) { requests.push(pool.query('SELECT ' + i)); } - setTimeout(() => { + + var test = () => { Promise.all(requests) .then(() => { setTimeout(() => { @@ -1090,10 +1091,10 @@ describe('Pool', () => { }, 5); setTimeout(() => { - //wait for 1 second + //wait for 2 second > idleTimeout assert.equal(pool.totalConnections(), 8); assert.equal(pool.idleConnections(), 8); - }, 1000); + }, 2000); setTimeout(() => { //minimumIdle-1 is possible after reaching idleTimeout and connection @@ -1108,7 +1109,21 @@ describe('Pool', () => { pool.end(); done(err); }); - }, 4000); + }; + + const waitServerConnections = (max) => { + if (max > 0) { + setTimeout(() => { + console.log(pool.totalConnections()); + if (pool.totalConnections() < 8) { + waitServerConnections(max - 1); + } else test(); + }, 1000); + } else { + done(new Error("pool doesn't have at least 8 connections after 10s")); + } + }; + waitServerConnections(10); }); it('test minimum idle', function (done) { From 7d015b539ef4a5f7c0f2e834a7743bd3422174a4 Mon Sep 17 00:00:00 2001 From: rusher Date: Fri, 4 Dec 2020 10:53:45 +0100 Subject: [PATCH 17/21] [misc] correcting travis test user --- .travis/script.sh | 3 +-- .travis/sql/dbinit.sql | 10 ++-------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/.travis/script.sh b/.travis/script.sh index f045ffcc..70453bc4 100644 --- a/.travis/script.sh +++ b/.travis/script.sh @@ -32,8 +32,7 @@ if [ -n "$SKYSQL" ] || [ -n "$SKYSQL_HA" ]; then fi else - export TEST_USER=boby - export TEST_PASSWORD=heyPassw0@rd + export TEST_USER=bob export TEST_HOST=mariadb.example.com export COMPOSE_FILE=.travis/docker-compose.yml export ENTRYPOINT=$PROJ_PATH/.travis/sql diff --git a/.travis/sql/dbinit.sql b/.travis/sql/dbinit.sql index 9b671f60..1c82b22d 100644 --- a/.travis/sql/dbinit.sql +++ b/.travis/sql/dbinit.sql @@ -1,14 +1,8 @@ -CREATE USER 'bob'@'localhost'; -GRANT ALL ON *.* TO 'bob'@'localhost' with grant option; - CREATE USER 'bob'@'%'; GRANT ALL ON *.* TO 'bob'@'%' with grant option; -CREATE USER 'boby'@'%' identified by 'heyPassw0@rd'; -GRANT ALL ON *.* TO 'boby'@'%' with grant option; - -CREATE USER 'boby'@'localhost' identified by 'heyPassw0@rd'; -GRANT ALL ON *.* TO 'boby'@'localhost' with grant option; +/*M!100501 CREATE USER 'boby'@'%' identified by 'hey'*/; +/*M!100501 GRANT ALL ON *.* TO 'boby'@'%' with grant option*/; FLUSH PRIVILEGES; From 67f545c14584300d5fc3a4dd22c43d5073f36fa8 Mon Sep 17 00:00:00 2001 From: rusher Date: Fri, 4 Dec 2020 11:10:41 +0100 Subject: [PATCH 18/21] bump 2.5.2 --- CHANGELOG.md | 12 ++++++++++++ package.json | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c95f6abc..8dbdab62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Change Log + +## [2.5.2](https://github.com/mariadb-corporation/mariadb-connector-nodejs/tree/2.5.2) (14 Dec 2020) +[Full Changelog](https://github.com/mariadb-corporation/mariadb-connector-nodejs/compare/2.5.1...2.5.2) + +* [CONJS-151] bulk batch error (parameter truncation) #137 +* [CONJS-152] correction when enabling the `permitLocalInfile` option and some initial commands +* [CONJS-154] Timezone support correction and clarification +* [CONJS-155] correction to support for node.js 10.13 to 10.19 +* [CONJS-156] Ensure setting capability PLUGIN_AUTH only if server has it + +documentation improvement + ## [2.5.1](https://github.com/mariadb-corporation/mariadb-connector-nodejs/tree/2.5.1) (23 Oct 2020) [Full Changelog](https://github.com/mariadb-corporation/mariadb-connector-nodejs/compare/2.5.0...2.5.1) diff --git a/package.json b/package.json index 0ed420c7..4464768e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "mariadb", - "version": "2.6.0", + "version": "2.5.2", "description": "fast mariadb/mysql connector.", "main": "promise.js", "types": "types/index.d.ts", From 6bdde9f1c8d1de89d8199ba140fe4d07f69d52fe Mon Sep 17 00:00:00 2001 From: rusher Date: Fri, 4 Dec 2020 14:27:39 +0100 Subject: [PATCH 19/21] [misc] batch correction for SKYSQL --- .travis/script.sh | 2 + .travis/sql/dbinit.sql | 2 +- test/integration/test-batch.js | 152 +++++++++++---------------- test/integration/test-big-query.js | 1 + test/integration/test-compression.js | 2 + test/integration/test-streaming.js | 122 ++++++++------------- 6 files changed, 112 insertions(+), 169 deletions(-) diff --git a/.travis/script.sh b/.travis/script.sh index 70453bc4..ca8e5276 100644 --- a/.travis/script.sh +++ b/.travis/script.sh @@ -51,6 +51,8 @@ else docker-compose -f ${COMPOSE_FILE} build export TEST_PORT=4006 export TEST_SSL_PORT=4009 + export TEST_USER=boby + export TEST_PASSWORD=heyPassw0@rd fi docker-compose -f ${COMPOSE_FILE} up -d diff --git a/.travis/sql/dbinit.sql b/.travis/sql/dbinit.sql index 1c82b22d..ebfaaedb 100644 --- a/.travis/sql/dbinit.sql +++ b/.travis/sql/dbinit.sql @@ -1,7 +1,7 @@ CREATE USER 'bob'@'%'; GRANT ALL ON *.* TO 'bob'@'%' with grant option; -/*M!100501 CREATE USER 'boby'@'%' identified by 'hey'*/; +/*M!100501 CREATE USER 'boby'@'%' identified by 'heyPassw0@rd'*/; /*M!100501 GRANT ALL ON *.* TO 'boby'@'%' with grant option*/; FLUSH PRIVILEGES; diff --git a/test/integration/test-batch.js b/test/integration/test-batch.js index 60d2d7ef..9620994e 100644 --- a/test/integration/test-batch.js +++ b/test/integration/test-batch.js @@ -13,7 +13,7 @@ const str = base.utf8Collation() describe('batch', () => { const fileName = path.join(os.tmpdir(), Math.random() + 'tempBatchFile.txt'); - const testSize = 16 * 1024 * 1024 + 800; // more than one packet + const testSize = 16 * 1024 * 1024 + 80; // more than one packet let maxAllowedSize, bigBuf, timezoneParam; let supportBulk; @@ -60,6 +60,7 @@ describe('batch', () => { 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(128), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' ); await shareConn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); const f = {}; f.toSqlString = () => { @@ -163,6 +164,8 @@ describe('batch', () => { id4: 3 } ]); + await conn.query('ROLLBACK'); + conn.query('DROP TABLE simpleBatch'); clearTimeout(timeout); @@ -180,6 +183,7 @@ describe('batch', () => { conn.query('DROP TABLE IF EXISTS simpleBatchWithOptions'); conn.query('CREATE TABLE simpleBatchWithOptions(id int, d datetime)'); await shareConn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); const f = {}; f.toSqlString = () => { @@ -207,6 +211,8 @@ describe('batch', () => { d: new Date('2001-12-31 23:59:58') } ]); + await conn.query('ROLLBACK'); + conn.query('DROP TABLE simpleBatchWithOptions'); clearTimeout(timeout); conn.end(); @@ -225,6 +231,8 @@ describe('batch', () => { conn.query('DROP TABLE IF EXISTS simpleBatchCP1251'); conn.query('CREATE TABLE simpleBatchCP1251(t varchar(128), id int) CHARSET utf8mb4'); await shareConn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); + let res = await conn.batch('INSERT INTO `simpleBatchCP1251` values (?, ?)', [ ['john', 2], ['©°', 3] @@ -235,6 +243,8 @@ describe('batch', () => { { id: 2, t: 'john' }, { id: 3, t: '©°' } ]); + await conn.query('ROLLBACK'); + conn.query('DROP TABLE simpleBatchCP1251'); clearTimeout(timeout); conn.end(); @@ -275,6 +285,7 @@ describe('batch', () => { console.log(conn.info.getLastPackets()); }, 2000); await shareConn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); await conn.batch('INSERT INTO noValueBatch values ()', []); const res = await conn.query('SELECT COUNT(*) as nb FROM noValueBatch'); @@ -298,6 +309,7 @@ describe('batch', () => { 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(8), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); try { let res = await conn.batch('INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', [ [ @@ -402,6 +414,7 @@ describe('batch', () => { 'CREATE TABLE bigBatchWith16mMaxAllowedPacket(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); const values = []; for (let i = 0; i < 1000000; i++) { @@ -453,6 +466,8 @@ describe('batch', () => { 'CREATE TABLE bigBatchWith4mMaxAllowedPacket(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); + const values = []; for (let i = 0; i < 1000000; i++) { values.push([i, str]); @@ -502,6 +517,8 @@ describe('batch', () => { for (let i = 0; i < 1000000; i++) { values.push([i, str]); } + await conn.query('START TRANSACTION'); + try { await conn.batch('INSERT INTO `bigBatchError` values (1, ?, 2, ?, 3)', values); throw new Error('must have thrown error !'); @@ -523,6 +540,8 @@ describe('batch', () => { 'CREATE TABLE singleBigInsertWithoutMaxAllowedPacket(id int, id2 int, id3 int, t longtext, id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLE'); + await conn.query('START TRANSACTION'); + const res = await conn.batch( 'INSERT INTO `singleBigInsertWithoutMaxAllowedPacket` values (1, ?, 2, ?, 3)', [ @@ -569,6 +588,8 @@ describe('batch', () => { 'CREATE TABLE batchWithStream(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); + let res = await conn.batch('INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', [ [1, stream1, 99], [2, stream2, 98] @@ -646,6 +667,8 @@ describe('batch', () => { 'CREATE TABLE bigBatchWithStreams(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); + let res = await conn.batch( 'INSERT INTO `bigBatchWithStreams` values (1, ?, 2, ?, ?, 3)', values @@ -715,6 +738,8 @@ describe('batch', () => { 'CREATE TABLE simpleNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); + let res = await conn.batch( 'INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', [ @@ -823,6 +848,8 @@ describe('batch', () => { 'CREATE TABLE more16MNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); + const values = []; for (let i = 0; i < 1000000; i++) { values.push({ id1: i, id2: str }); @@ -869,6 +896,8 @@ describe('batch', () => { 'CREATE TABLE more16MSingleNamedPlaceHolders(id int, id2 int, id3 int, t longtext, id4 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); + const res = await conn.batch( 'INSERT INTO `more16MSingleNamedPlaceHolders` values (1, :id, 2, :id2, 3)', [ @@ -911,6 +940,8 @@ describe('batch', () => { 'CREATE TABLE streamNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLE'); + await conn.query('START TRANSACTION'); + const res = await conn.batch( 'INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', [ @@ -994,6 +1025,8 @@ describe('batch', () => { 'CREATE TABLE stream16MNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); await conn.query('FLUSH TABLES'); + await conn.query('START TRANSACTION'); + const res = await conn.batch( 'INSERT INTO `stream16MNamedPlaceHolders` values (1, :id1, 2, :id2, :id3, 3)', values @@ -1040,7 +1073,7 @@ describe('batch', () => { const useCompression = false; it('simple batch, local date', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !base.utf8Collation()) { + if (!base.utf8Collation()) { this.skip(); return; } @@ -1050,31 +1083,19 @@ describe('batch', () => { }); it('simple batch with option', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) { - this.skip(); - return; - } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); await simpleBatchWithOptions(useCompression, true); }); it('batch without value', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) { - this.skip(); - return; - } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); await noValueBatch(useCompression, true); }); it('batch without parameter', async function () { - if ( - process.env.SKYSQL || - process.env.SKYSQL_HA || - (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) - ) { + if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) { this.skip(); return; } @@ -1090,11 +1111,7 @@ describe('batch', () => { }); it('batch with erroneous parameter', async function () { - if ( - process.env.SKYSQL || - process.env.SKYSQL_HA || - (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) - ) { + if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) { this.skip(); return; } @@ -1115,7 +1132,7 @@ describe('batch', () => { }); it('simple batch offset date', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !base.utf8Collation()) { + if (!base.utf8Collation()) { this.skip(); return; } @@ -1125,7 +1142,7 @@ describe('batch', () => { }); it('simple batch offset date Z ', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !base.utf8Collation()) { + if (!base.utf8Collation()) { this.skip(); return; } @@ -1135,16 +1152,13 @@ describe('batch', () => { }); it('simple batch encoding CP1251', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) { - this.skip(); - return; - } this.timeout(30000); await simpleBatchEncodingCP1251(useCompression, true, 'local'); }); it('simple batch error message ', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) { + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 this.skip(); return; } @@ -1153,17 +1167,13 @@ describe('batch', () => { }); it('simple batch error message packet split', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) { - this.skip(); - return; - } this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); await simpleBatchErrorSplit(useCompression, true, 'local'); }); it('non rewritable batch', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) { + if (!supportBulk) { this.skip(); return; } @@ -1172,12 +1182,7 @@ describe('batch', () => { }); it('16M+ batch with 16M max_allowed_packet', async function () { - if ( - process.env.SKYSQL || - process.env.SKYSQL_HA || - !process.env.RUN_LONG_TEST || - maxAllowedSize <= testSize - ) { + if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); return; } @@ -1186,12 +1191,7 @@ describe('batch', () => { }); it('16M+ batch with max_allowed_packet set to 4M', async function () { - if ( - process.env.SKYSQL || - process.env.SKYSQL_HA || - !process.env.RUN_LONG_TEST || - maxAllowedSize <= 4 * 1024 * 1024 - ) { + if (!process.env.RUN_LONG_TEST || maxAllowedSize <= 4 * 1024 * 1024) { this.skip(); return; } @@ -1209,12 +1209,7 @@ describe('batch', () => { }); it('16M+ single insert batch with no maxAllowedPacket set', async function () { - if ( - process.env.SKYSQL || - process.env.SKYSQL_HA || - !process.env.RUN_LONG_TEST || - maxAllowedSize <= testSize - ) { + if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); } else { this.timeout(360000); @@ -1223,7 +1218,7 @@ describe('batch', () => { }); it('batch with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !base.utf8Collation()) { + if (!base.utf8Collation()) { this.skip(); } else { this.timeout(30000); @@ -1232,21 +1227,12 @@ describe('batch', () => { }); it('batch error with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) { - this.skip(); - } else { - this.timeout(30000); - await batchErrorWithStream(useCompression, true); - } + this.timeout(30000); + await batchErrorWithStream(useCompression, true); }); it('16M+ batch with streams', async function () { - if ( - process.env.SKYSQL || - process.env.SKYSQL_HA || - !process.env.RUN_LONG_TEST || - maxAllowedSize <= testSize - ) { + if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); } else { this.timeout(360000); @@ -1255,12 +1241,7 @@ describe('batch', () => { }); it('16M+ error batch with streams', async function () { - if ( - process.env.SKYSQL || - process.env.SKYSQL_HA || - !process.env.RUN_LONG_TEST || - maxAllowedSize <= testSize - ) { + if (!process.env.RUN_LONG_TEST || maxAllowedSize <= testSize) { this.skip(); return; } @@ -1273,7 +1254,6 @@ describe('batch', () => { const useCompression = true; it('simple batch, local date', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -1281,7 +1261,6 @@ describe('batch', () => { }); it('simple batch offset date', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -1289,26 +1268,28 @@ describe('batch', () => { }); it('simple batch error message ', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 + this.skip(); + return; + } this.timeout(30000); await simpleBatchErrorMsg(useCompression, true); }); it('batch without value', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); await noValueBatch(useCompression, true); }); it('non rewritable batch', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); + if (!supportBulk) this.skip(); this.timeout(30000); await nonRewritableBatch(useCompression, true); }); it('16M+ batch with 16M max_allowed_packet', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); @@ -1316,7 +1297,6 @@ describe('batch', () => { }); it('16M+ batch with max_allowed_packet set to 4M', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= 4 * 1024 * 1024) this.skip(); this.timeout(360000); @@ -1324,14 +1304,12 @@ describe('batch', () => { }); it('16M+ error batch', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); await bigBatchError(useCompression, true); }); it('16M+ single insert batch with no maxAllowedPacket set', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); @@ -1339,20 +1317,17 @@ describe('batch', () => { }); it('batch with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); await batchWithStream(useCompression, true); }); it('batch error with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); await batchErrorWithStream(useCompression, true); }); it('16M+ batch with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); @@ -1360,7 +1335,6 @@ describe('batch', () => { }); it('16M+ error batch with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); @@ -1701,25 +1675,27 @@ describe('batch', () => { describe('named parameter with bulk', () => { it('simple batch', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); await simpleNamedPlaceHolders(true); }); it('simple batch error', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 + this.skip(); + return; + } this.timeout(30000); await simpleNamedPlaceHoldersErr(true); }); it('non rewritable batch', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); + if (!supportBulk) this.skip(); this.timeout(30000); await nonRewritableHoldersErr(true); }); it('16M+ batch', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); @@ -1727,7 +1703,6 @@ describe('batch', () => { }); it('16M+ single insert batch', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); @@ -1735,20 +1710,17 @@ describe('batch', () => { }); it('batch with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); await streamNamedPlaceHolders(true); }); it('batch error with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); await streamErrorNamedPlaceHolders(true); }); it('16M+ batch with streams', async function () { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!process.env.RUN_LONG_TEST) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); diff --git a/test/integration/test-big-query.js b/test/integration/test-big-query.js index 614b1017..a65d6aba 100644 --- a/test/integration/test-big-query.js +++ b/test/integration/test-big-query.js @@ -77,6 +77,7 @@ describe('Big query', function () { conn.query('DROP TABLE IF EXISTS bigParameter'); conn.query(sql); await shareConn.query('FLUSH TABLES'); + await conn.beginTransaction(); conn.query(sqlInsert, params); const rows = await conn.query('SELECT * from bigParameter'); for (let i = 0; i < 10; i++) { diff --git a/test/integration/test-compression.js b/test/integration/test-compression.js index dca49125..ecf4163a 100644 --- a/test/integration/test-compression.js +++ b/test/integration/test-compression.js @@ -74,6 +74,7 @@ describe('Compression', function () { conn.query('DROP TABLE IF EXISTS bigParameter'); conn.query('CREATE TABLE bigParameter (b longblob)'); await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); conn.query('insert into bigParameter(b) values(?)', [buf]); const rows = await conn.query('SELECT * from bigParameter'); assert.deepEqual(rows[0].b, buf); @@ -85,6 +86,7 @@ describe('Compression', function () { conn.query('DROP TABLE IF EXISTS bigParameter2'); conn.query('CREATE TABLE bigParameter2 (b longblob)'); await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); conn.query('insert into bigParameter2(b) values(?)', [randomBuf]); const rows = await conn.query('SELECT * from bigParameter2'); assert.deepEqual(rows[0].b, randomBuf); diff --git a/test/integration/test-streaming.js b/test/integration/test-streaming.js index 9f718224..66173b1d 100644 --- a/test/integration/test-streaming.js +++ b/test/integration/test-streaming.js @@ -66,81 +66,57 @@ describe('streaming', () => { }); }); - it('Streaming single parameter', function (done) { + it('Streaming single parameter', async function () { if (maxAllowedSize < size) this.skip(); this.timeout(20000); const r = fs.createReadStream(fileName); - shareConn - .query('truncate Streaming') - .then(() => { - return shareConn.query('insert into Streaming(b) values(?)', [r]); - }) - .then(() => { - return shareConn.query('SELECT b from Streaming'); - }) - .then((rows) => { - assert.equal(size, rows[0].b.length); - assert.deepEqual(rows, [{ b: buf }]); - done(); - }) - .catch(done); + await shareConn.query('truncate Streaming'); + await shareConn.beginTransaction(); + await shareConn.query('insert into Streaming(b) values(?)', [r]); + const rows = await shareConn.query('SELECT b from Streaming'); + assert.equal(size, rows[0].b.length); + assert.deepEqual(rows, [{ b: buf }]); }); - it('Streaming multiple parameter', function (done) { + it('Streaming multiple parameter', async function () { this.timeout(20000); if (maxAllowedSize < size) this.skip(); const r = fs.createReadStream(halfFileName); const r2 = fs.createReadStream(halfFileName); - shareConn - .query('truncate Streaming') - .then(() => { - return shareConn.query('insert into Streaming(b, c, d, e) values(?, ?, ?, ?)', [ - r, - 't1', - r2, - 't2' - ]); - }) - .then(() => { - return shareConn.query('SELECT * from Streaming'); - }) - .then((rows) => { - assert.equal(size / 2, rows[0].b.length); - assert.equal(size / 2, rows[0].d.length); - assert.deepEqual(rows, [{ id: 1, b: buf2, c: 't1', d: buf2, e: 't2' }]); - done(); - }) - .catch(done); + await shareConn.query('truncate Streaming'); + await shareConn.beginTransaction(); + await shareConn.query('insert into Streaming(b, c, d, e) values(?, ?, ?, ?)', [ + r, + 't1', + r2, + 't2' + ]); + const rows = await shareConn.query('SELECT * from Streaming'); + assert.equal(size / 2, rows[0].b.length); + assert.equal(size / 2, rows[0].d.length); + assert.deepEqual(rows, [{ id: 1, b: buf2, c: 't1', d: buf2, e: 't2' }]); }); - it('Streaming multiple parameter begin no stream', function (done) { + it('Streaming multiple parameter begin no stream', async function () { if (maxAllowedSize < size) this.skip(); this.timeout(20000); const r = fs.createReadStream(halfFileName); const r2 = fs.createReadStream(halfFileName); - shareConn - .query('truncate Streaming') - .then(() => { - return shareConn.query('insert into Streaming(c, b, e, d) values(?, ?, ?, ?)', [ - 't1', - r, - 't2', - r2 - ]); - }) - .then(() => { - return shareConn.query('SELECT * from Streaming'); - }) - .then((rows) => { - assert.equal(size / 2, rows[0].b.length); - assert.equal(size / 2, rows[0].d.length); - assert.deepEqual(rows, [{ id: 1, b: buf2, c: 't1', d: buf2, e: 't2' }]); - done(); - }) - .catch(done); + await shareConn.query('truncate Streaming'); + await shareConn.beginTransaction(); + await shareConn.query('insert into Streaming(c, b, e, d) values(?, ?, ?, ?)', [ + 't1', + r, + 't2', + r2 + ]); + const rows = await shareConn.query('SELECT * from Streaming'); + assert.equal(size / 2, rows[0].b.length); + assert.equal(size / 2, rows[0].d.length); + assert.deepEqual(rows, [{ id: 1, b: buf2, c: 't1', d: buf2, e: 't2' }]); }); - it('Streaming multiple parameter ensure max callstack', function (done) { + it('Streaming multiple parameter ensure max callstack', async function () { if (maxAllowedSize < size) this.skip(); this.timeout(20000); const r = fs.createReadStream(halfFileName); @@ -157,26 +133,16 @@ describe('streaming', () => { createTable += ')'; insertSql += ')'; - shareConn - .query('DROP TABLE IF EXISTS Streaming2') - .then(() => { - return shareConn.query(createTable); - }) - .then(() => { - return shareConn.query(insertSql, params); - }) - .then(() => { - return shareConn.query('SELECT * from Streaming2'); - }) - .then((rows) => { - assert.equal(size / 2, rows[0].b.length); - assert.deepEqual(rows[0].b, buf2); - for (let i = 0; i < max; i++) { - assert.equal(rows[0]['t' + i], i); - } - done(); - }) - .catch(done); + await shareConn.query('DROP TABLE IF EXISTS Streaming2'); + await shareConn.query(createTable); + await shareConn.beginTransaction(); + await shareConn.query(insertSql, params); + const rows = await shareConn.query('SELECT * from Streaming2'); + assert.equal(size / 2, rows[0].b.length); + assert.deepEqual(rows[0].b, buf2); + for (let i = 0; i < max; i++) { + assert.equal(rows[0]['t' + i], i); + } }); function createTmpFiles(done) { From 867bc7e3ae744a62f80e5077f45dc45d90eb97cc Mon Sep 17 00:00:00 2001 From: rusher Date: Fri, 4 Dec 2020 17:02:39 +0100 Subject: [PATCH 20/21] [misc] improve test for replication servers. changing test to use a transaction when using a select just after insert. This permit ensure using same server than insert, avoiding issue of replication lag. --- test/integration/test-batch-callback.js | 756 ++++---- test/integration/test-batch-geometry-type.js | 1700 +++++++++--------- test/integration/test-big-query.js | 6 +- test/integration/test-connection-opts.js | 320 +--- test/integration/test-local-infile.js | 266 ++- test/integration/test-metadata.js | 117 +- test/integration/test-multi-results.js | 223 +-- test/integration/test-placholders.js | 76 +- test/integration/test-query-values-in-sql.js | 194 +- test/integration/test-query.js | 299 +-- test/integration/test-typecast.js | 217 +-- 11 files changed, 1810 insertions(+), 2364 deletions(-) diff --git a/test/integration/test-batch-callback.js b/test/integration/test-batch-callback.js index 8ace2e4f..f9d2ff18 100644 --- a/test/integration/test-batch-callback.js +++ b/test/integration/test-batch-callback.js @@ -89,123 +89,125 @@ describe('batch callback', () => { 'CHARSET utf8mb4' ); conn.query('FLUSH TABLES'); - const f = {}; - f.toSqlString = () => { - return 'blabla'; - }; - conn.batch( - 'INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', - [ - [ - true, - 'john😎🌶\\\\', - new Date('2001-12-31 23:59:58'), - new Date('2018-01-01 12:30:20.456789'), - { - type: 'Point', - coordinates: [10, 10] - } - ], - [ - true, - f, - new Date('2001-12-31 23:59:58'), - new Date('2018-01-01 12:30:20.456789'), - { - type: 'Point', - coordinates: [10, 10] - } - ], - [ - false, - { name: 'jackमस्', val: 'tt' }, - null, - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [10, 20] - } - ], + conn.beginTransaction(() => { + const f = {}; + f.toSqlString = () => { + return 'blabla'; + }; + conn.batch( + 'INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', [ - 0, - null, - new Date('2020-12-31 23:59:59'), - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [20, 20] - } - ] - ], - (err, res) => { - if (err) return done(err); - - assert.equal(res.affectedRows, 4); - conn.query('select * from `simpleBatch`', (err, res) => { - if (err) return done(err); - assert.deepEqual(res, [ + [ + true, + 'john😎🌶\\\\', + new Date('2001-12-31 23:59:58'), + new Date('2018-01-01 12:30:20.456789'), { - id: 1, - id2: 1, - id3: 2, - t: 'john😎🌶\\\\', - d: new Date('2001-12-31 23:59:58'), - d2: new Date('2018-01-01 12:30:20.456789'), - g: { - type: 'Point', - coordinates: [10, 10] - }, - id4: 3 - }, + type: 'Point', + coordinates: [10, 10] + } + ], + [ + true, + f, + new Date('2001-12-31 23:59:58'), + new Date('2018-01-01 12:30:20.456789'), { - id: 1, - id2: 1, - id3: 2, - t: 'blabla', - d: new Date('2001-12-31 23:59:58'), - d2: new Date('2018-01-01 12:30:20.456789'), - g: { - type: 'Point', - coordinates: [10, 10] - }, - id4: 3 - }, + type: 'Point', + coordinates: [10, 10] + } + ], + [ + false, + { name: 'jackमस्', val: 'tt' }, + null, + new Date('2018-01-21 11:30:20.123456'), { - id: 1, - id2: 0, - id3: 2, - t: '{"name":"jackमस्","val":"tt"}', - d: null, - d2: new Date('2018-01-21 11:30:20.123456'), - g: { - type: 'Point', - coordinates: [10, 20] - }, - id4: 3 - }, + type: 'Point', + coordinates: [10, 20] + } + ], + [ + 0, + null, + new Date('2020-12-31 23:59:59'), + new Date('2018-01-21 11:30:20.123456'), { - id: 1, - id2: 0, - id3: 2, - t: null, - d: new Date('2020-12-31 23:59:59'), - d2: new Date('2018-01-21 11:30:20.123456'), - g: { - type: 'Point', - coordinates: [20, 20] - }, - id4: 3 + type: 'Point', + coordinates: [20, 20] } - ]); - conn.query('DROP TABLE simpleBatch', (err, res) => { - clearTimeout(timeout); - conn.end(() => { - done(); + ] + ], + (err, res) => { + if (err) return done(err); + + assert.equal(res.affectedRows, 4); + conn.query('select * from `simpleBatch`', (err, res) => { + if (err) return done(err); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'john😎🌶\\\\', + d: new Date('2001-12-31 23:59:58'), + d2: new Date('2018-01-01 12:30:20.456789'), + g: { + type: 'Point', + coordinates: [10, 10] + }, + id4: 3 + }, + { + id: 1, + id2: 1, + id3: 2, + t: 'blabla', + d: new Date('2001-12-31 23:59:58'), + d2: new Date('2018-01-01 12:30:20.456789'), + g: { + type: 'Point', + coordinates: [10, 10] + }, + id4: 3 + }, + { + id: 1, + id2: 0, + id3: 2, + t: '{"name":"jackमस्","val":"tt"}', + d: null, + d2: new Date('2018-01-21 11:30:20.123456'), + g: { + type: 'Point', + coordinates: [10, 20] + }, + id4: 3 + }, + { + id: 1, + id2: 0, + id3: 2, + t: null, + d: new Date('2020-12-31 23:59:59'), + d2: new Date('2018-01-21 11:30:20.123456'), + g: { + type: 'Point', + coordinates: [20, 20] + }, + id4: 3 + } + ]); + conn.query('DROP TABLE simpleBatch', (err, res) => { + clearTimeout(timeout); + conn.end(() => { + done(); + }); }); }); - }); - } - ); + } + ); + }); conn.query('select 1', (err, rows) => { if (err) return done(err); assert.deepEqual(rows, [{ 1: 1 }]); @@ -227,49 +229,52 @@ describe('batch callback', () => { conn.query('DROP TABLE IF EXISTS simpleBatchWithOptions'); conn.query('CREATE TABLE simpleBatchWithOptions(id int, d datetime)'); conn.query('FLUSH TABLES'); - const f = {}; - f.toSqlString = () => { - return 'blabla'; - }; - conn.batch( - { - sql: 'INSERT INTO `simpleBatchWithOptions` values (?, ?)', - maxAllowedPacket: 1048576 - }, - [ - [1, new Date('2001-12-31 23:59:58')], - [2, new Date('2001-12-31 23:59:58')] - ], - (err, res) => { - if (err) { - return conn.end(() => { - done(err); - }); - } + conn.beginTransaction(() => { + const f = {}; + f.toSqlString = () => { + return 'blabla'; + }; + conn.batch( + { + sql: 'INSERT INTO `simpleBatchWithOptions` values (?, ?)', + maxAllowedPacket: 1048576 + }, + [ + [1, new Date('2001-12-31 23:59:58')], + [2, new Date('2001-12-31 23:59:58')] + ], + (err, res) => { + if (err) { + return conn.end(() => { + done(err); + }); + } - assert.equal(res.affectedRows, 2); - conn.query('select * from `simpleBatchWithOptions`', (err, res) => { - if (err) return done(err); - assert.deepEqual(res, [ - { - id: 1, - d: new Date('2001-12-31 23:59:58') - }, - { - id: 2, - d: new Date('2001-12-31 23:59:58') - } - ]); - conn.query('DROP TABLE simpleBatchWithOptions', (err, res) => { + assert.equal(res.affectedRows, 2); + conn.query('select * from `simpleBatchWithOptions`', (err, res) => { if (err) return done(err); - clearTimeout(timeout); - conn.end(() => { - done(); + assert.deepEqual(res, [ + { + id: 1, + d: new Date('2001-12-31 23:59:58') + }, + { + id: 2, + d: new Date('2001-12-31 23:59:58') + } + ]); + conn.query('DROP TABLE simpleBatchWithOptions', (err, res) => { + if (err) return done(err); + clearTimeout(timeout); + conn.end(() => { + done(); + }); }); }); - }); - } - ); + } + ); + }); + conn.query('select 1', (err, rows) => { if (err) { return conn.end(() => { @@ -296,34 +301,37 @@ describe('batch callback', () => { conn.query('DROP TABLE IF EXISTS simpleBatchCP1251'); conn.query('CREATE TABLE simpleBatchCP1251(t varchar(128), id int) CHARSET utf8mb4'); conn.query('FLUSH TABLES'); - conn.batch( - 'INSERT INTO `simpleBatchCP1251` values (?, ?)', - [ - ['john', 2], - ['©°', 3] - ], - (err, res) => { - assert.equal(res.affectedRows, 2); - conn.query('select * from `simpleBatchCP1251`', (err, res) => { - if (err) { - return conn.end(() => { - done(err); - }); - } - assert.deepEqual(res, [ - { id: 2, t: 'john' }, - { id: 3, t: '©°' } - ]); - conn.query('DROP TABLE simpleBatchCP1251', (err, res) => { - if (err) return done(err); - clearTimeout(timeout); - conn.end(() => { - done(); + conn.beginTransaction(() => { + conn.batch( + 'INSERT INTO `simpleBatchCP1251` values (?, ?)', + [ + ['john', 2], + ['©°', 3] + ], + (err, res) => { + assert.equal(res.affectedRows, 2); + conn.query('select * from `simpleBatchCP1251`', (err, res) => { + if (err) { + return conn.end(() => { + done(err); + }); + } + assert.deepEqual(res, [ + { id: 2, t: 'john' }, + { id: 3, t: '©°' } + ]); + conn.query('DROP TABLE simpleBatchCP1251', (err, res) => { + if (err) return done(err); + clearTimeout(timeout); + conn.end(() => { + done(); + }); }); }); - }); - } - ); + } + ); + }); + conn.query('select 2', (err, rows) => { if (err) { return conn.end(() => { @@ -390,67 +398,70 @@ describe('batch callback', () => { 'CREATE TABLE simpleBatch(id int, id2 boolean, id3 int, t varchar(8), d datetime, d2 datetime(6), g POINT, id4 int) CHARSET utf8mb4' ); conn.query('FLUSH TABLES', (err) => { - conn.batch( - 'INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', - [ - [ - true, - 'john', - new Date('2001-12-31 23:59:58'), - new Date('2018-01-01 12:30:20.456789'), - { - type: 'Point', - coordinates: [10, 10] - } - ], + conn.beginTransaction(() => { + conn.batch( + 'INSERT INTO `simpleBatch` values (1, ?, 2, ?, ?, ?, ?, 3)', [ - false, - '12345678901', - null, - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [10, 20] - } + [ + true, + 'john', + new Date('2001-12-31 23:59:58'), + new Date('2018-01-01 12:30:20.456789'), + { + type: 'Point', + coordinates: [10, 10] + } + ], + [ + false, + '12345678901', + null, + new Date('2018-01-21 11:30:20.123456'), + { + type: 'Point', + coordinates: [10, 20] + } + ], + [ + 0, + null, + new Date('2020-12-31 23:59:59'), + new Date('2018-01-21 11:30:20.123456'), + { + type: 'Point', + coordinates: [20, 20] + } + ] ], - [ - 0, - null, - new Date('2020-12-31 23:59:59'), - new Date('2018-01-21 11:30:20.123456'), - { - type: 'Point', - coordinates: [20, 20] - } - ] - ], - (err, res) => { - if (err) { - assert.isTrue( - err.message.includes("Data too long for column 't' at row 2"), - err.message - ); - conn.query('DROP TABLE simpleBatch', (err, res) => { - clearTimeout(timeout); + (err, res) => { + if (err) { + assert.isTrue( + err.message.includes("Data too long for column 't' at row 2"), + err.message + ); + conn.query('DROP TABLE simpleBatch', (err, res) => { + clearTimeout(timeout); + conn.end(() => { + done(); + }); + }); + } else { conn.end(() => { - done(); + if ( + (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || + (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) + ) { + //field truncated must have thrown error + done(new Error('must have throw error !')); + } else { + done(); + } }); - }); - } else { - conn.end(() => { - if ( - (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) || - (!shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(5, 7, 0)) - ) { - //field truncated must have thrown error - done(new Error('must have throw error !')); - } else { - done(); - } - }); + } } - } - ); + ); + }); + conn.query('select 1', (err, rows) => { if (err) { return conn.end(() => { @@ -577,51 +588,53 @@ describe('batch callback', () => { 'CREATE TABLE batchWithStream(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); conn.query('FLUSH TABLES', (err) => { - conn.batch( - 'INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', - [ - [1, stream1, 99], - [2, stream2, 98] - ], - (err, res) => { - if (err) { - return conn.end(() => { - done(err); - }); - } - assert.equal(res.affectedRows, 2); - conn.query('select * from `batchWithStream`', (err, res) => { + conn.beginTransaction(() => { + conn.batch( + 'INSERT INTO `batchWithStream` values (1, ?, 2, ?, ?, 3)', + [ + [1, stream1, 99], + [2, stream2, 98] + ], + (err, res) => { if (err) { return conn.end(() => { done(err); }); } - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: 99, - id5: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: 98, - id5: 3 + assert.equal(res.affectedRows, 2); + conn.query('select * from `batchWithStream`', (err, res) => { + if (err) { + return conn.end(() => { + done(err); + }); } - ]); - conn.query('DROP TABLE batchWithStream'); - clearTimeout(timeout); - conn.end(() => { - done(); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: 99, + id5: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: 98, + id5: 3 + } + ]); + conn.query('DROP TABLE batchWithStream'); + clearTimeout(timeout); + conn.end(() => { + done(); + }); }); - }); - } - ); + } + ); + }); }); }); }; @@ -686,50 +699,52 @@ describe('batch callback', () => { 'CREATE TABLE simpleNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int) CHARSET utf8mb4' ); conn.query('FLUSH TABLES', (err) => { - conn.batch( - 'INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', - [ - { param_1: 1, param_2: 'john' }, - { param_1: 2, param_2: 'jack' } - ], - (err, res) => { - if (err) { - return conn.end(() => { - done(err); - }); - } - assert.equal(res.affectedRows, 2); - conn.query('select * from `simpleNamedPlaceHolders`', (err, res) => { + conn.beginTransaction(() => { + conn.batch( + 'INSERT INTO `simpleNamedPlaceHolders` values (1, :param_1, 2, :param_2, 3)', + [ + { param_1: 1, param_2: 'john' }, + { param_1: 2, param_2: 'jack' } + ], + (err, res) => { if (err) { return conn.end(() => { done(err); }); } - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'john', - id4: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'jack', - id4: 3 + assert.equal(res.affectedRows, 2); + conn.query('select * from `simpleNamedPlaceHolders`', (err, res) => { + if (err) { + return conn.end(() => { + done(err); + }); } - ]); - conn.query('DROP TABLE simpleNamedPlaceHolders', () => { - clearTimeout(timeout); - return conn.end(() => { - done(); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'john', + id4: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'jack', + id4: 3 + } + ]); + conn.query('DROP TABLE simpleNamedPlaceHolders', () => { + clearTimeout(timeout); + return conn.end(() => { + done(); + }); }); }); - }); - } - ); + } + ); + }); }); }); }; @@ -853,49 +868,51 @@ describe('batch callback', () => { 'CREATE TABLE streamNamedPlaceHolders(id int, id2 int, id3 int, t varchar(128), id4 int, id5 int) CHARSET utf8mb4' ); conn.query('FLUSH TABLES', (err) => { - conn.batch( - 'INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', - [ - { id1: 1, id3: stream1, id4: 99, id5: 6 }, - { id1: 2, id3: stream2, id4: 98 } - ], - (err, res) => { - if (err) { - conn.end(); - return done(err); - } - assert.equal(res.affectedRows, 2); - conn.query('select * from `streamNamedPlaceHolders`', (err, res) => { + conn.beginTransaction(() => { + conn.batch( + 'INSERT INTO `streamNamedPlaceHolders` values (1, :id1, 2, :id3, :id7, 3)', + [ + { id1: 1, id3: stream1, id4: 99, id5: 6 }, + { id1: 2, id3: stream2, id4: 98 } + ], + (err, res) => { if (err) { conn.end(); return done(err); } - assert.deepEqual(res, [ - { - id: 1, - id2: 1, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: null, - id5: 3 - }, - { - id: 1, - id2: 2, - id3: 2, - t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', - id4: null, - id5: 3 + assert.equal(res.affectedRows, 2); + conn.query('select * from `streamNamedPlaceHolders`', (err, res) => { + if (err) { + conn.end(); + return done(err); } - ]); - conn.query('DROP TABLE streamNamedPlaceHolders'); - clearTimeout(timeout); - conn.end(() => { - done(); + assert.deepEqual(res, [ + { + id: 1, + id2: 1, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: null, + id5: 3 + }, + { + id: 1, + id2: 2, + id3: 2, + t: 'abcdefghijkflmnopqrtuvwxyz🤘💪', + id4: null, + id5: 3 + } + ]); + conn.query('DROP TABLE streamNamedPlaceHolders'); + clearTimeout(timeout); + conn.end(() => { + done(); + }); }); - }); - } - ); + } + ); + }); }); }); }; @@ -945,7 +962,6 @@ describe('batch callback', () => { describe('standard question mark using bulk', () => { const useCompression = false; it('simple batch, local date', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -953,14 +969,12 @@ describe('batch callback', () => { }); it('simple batch with option', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); simpleBatchWithOptions(useCompression, true, done); }); it('batch without parameter', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); base.createConnection({ compress: useCompression, bulk: true }).then((conn) => { conn @@ -978,7 +992,6 @@ describe('batch callback', () => { }); it('batch with erroneous parameter', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); base.createConnection({ compress: useCompression, bulk: true }).then((conn) => { conn @@ -1001,7 +1014,6 @@ describe('batch callback', () => { }); it('simple batch offset date', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -1009,46 +1021,45 @@ describe('batch callback', () => { }); it('simple batch encoding CP1251', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); simpleBatchEncodingCP1251(useCompression, true, 'local', done); }); it('simple batch error message ', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 + this.skip(); + return; + } this.timeout(30000); simpleBatchErrorMsg(useCompression, true, done); }); it('simple batch error message packet split', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); simpleBatchErrorSplit(useCompression, true, 'local', done); }); it('non rewritable batch', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); + if (!supportBulk) this.skip(); this.timeout(30000); nonRewritableBatch(useCompression, true, done); }); it('16M+ error batch', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); bigBatchError(useCompression, true, done); }); it('batch with streams', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); batchWithStream(useCompression, true, done); }); it('batch error with streams', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); batchErrorWithStream(useCompression, true, done); }); @@ -1058,7 +1069,6 @@ describe('batch callback', () => { const useCompression = true; it('simple batch, local date', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -1066,7 +1076,6 @@ describe('batch callback', () => { }); it('simple batch offset date', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); if (!shareConn.info.isMariaDB() && !shareConn.info.hasMinVersion(5, 6, 0)) this.skip(); @@ -1074,33 +1083,34 @@ describe('batch callback', () => { }); it('simple batch error message ', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 + this.skip(); + return; + } this.timeout(30000); simpleBatchErrorMsg(useCompression, true, done); }); it('non rewritable batch', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); + if (!supportBulk) this.skip(); this.timeout(30000); nonRewritableBatch(useCompression, true, done); }); it('16M+ error batch', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (maxAllowedSize <= testSize) this.skip(); this.timeout(360000); bigBatchError(useCompression, true, done); }); it('batch with streams', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); batchWithStream(useCompression, true, done); }); it('batch error with streams', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); batchErrorWithStream(useCompression, true, done); }); @@ -1202,6 +1212,11 @@ describe('batch callback', () => { }); it('simple batch error message ', function (done) { + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 + this.skip(); + return; + } this.timeout(30000); simpleBatchErrorMsg(useCompression, false, done); }); @@ -1241,6 +1256,11 @@ describe('batch callback', () => { }); it('simple batch error message ', function (done) { + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 + this.skip(); + return; + } this.timeout(30000); simpleBatchErrorMsg(useCompression, false, done); }); @@ -1264,32 +1284,33 @@ describe('batch callback', () => { describe('named parameter with bulk', () => { it('simple batch', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); simpleNamedPlaceHolders(true, done); }); it('simple batch error', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 + this.skip(); + return; + } this.timeout(30000); simpleNamedPlaceHoldersErr(true, done); }); it('non rewritable batch', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA || !supportBulk) this.skip(); + if (!supportBulk) this.skip(); this.timeout(30000); nonRewritableHoldersErr(true, done); }); it('batch with streams', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); if (!base.utf8Collation()) this.skip(); this.timeout(30000); streamNamedPlaceHolders(true, done); }); it('batch error with streams', function (done) { - if (process.env.SKYSQL || process.env.SKYSQL_HA) this.skip(); this.timeout(30000); streamErrorNamedPlaceHolders(true, done); }); @@ -1302,6 +1323,11 @@ describe('batch callback', () => { }); it('simple batch error', function (done) { + if (process.env.SKYSQL_HA) { + // due to https://jira.mariadb.org/browse/MXS-3196 + this.skip(); + return; + } this.timeout(30000); simpleNamedPlaceHoldersErr(false, done); }); diff --git a/test/integration/test-batch-geometry-type.js b/test/integration/test-batch-geometry-type.js index 5fab0bf7..d8a2b72d 100644 --- a/test/integration/test-batch-geometry-type.js +++ b/test/integration/test-batch-geometry-type.js @@ -13,942 +13,871 @@ describe('batch geometry type', () => { : false; }); - it('Point format', function (done) { + it('Point format', async function () { if (!shareConn.info.isMariaDB()) this.skip(); - shareConn - .query('DROP TABLE IF EXISTS gis_point_batch') - .then(() => { - return shareConn.query('CREATE TABLE gis_point_batch (g POINT)'); - }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - .then(() => { - return shareConn.batch('INSERT INTO gis_point_batch VALUES (?)', [ - [ - { - type: 'Point', - coordinates: [10, 10] - } - ], - [ - { - type: 'Point', - coordinates: [20, 10] - } - ], - [ - { - type: 'Point', - coordinates: [20, 20] - } - ], - [ - { - type: 'Point', - coordinates: [10, 20] - } - ], - [ - { - type: 'Point', - coordinates: [] - } - ], - [ - { - type: 'Point' - } - ] - ]); - }) - .then(() => { - return shareConn.query('SELECT * FROM gis_point_batch'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { - g: { - type: 'Point', - coordinates: [10, 10] - } - }, - { - g: { - type: 'Point', - coordinates: [20, 10] - } - }, - { - g: { - type: 'Point', - coordinates: [20, 20] - } - }, - { - g: { - type: 'Point', - coordinates: [10, 20] - } - }, - { - g: - shareConn.info.isMariaDB() && - shareConn.info.hasMinVersion(10, 5, 2) && - !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'Point' } - : null - }, - { - g: - shareConn.info.isMariaDB() && - shareConn.info.hasMinVersion(10, 5, 2) && - !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'Point' } - : null - } - ]); - done(); - }) - .catch(done); + await shareConn.query('DROP TABLE IF EXISTS gis_point_batch'); + await shareConn.query('CREATE TABLE gis_point_batch (g POINT)'); + await shareConn.query('FLUSH TABLES'); + await shareConn.beginTransaction(); + await shareConn.batch('INSERT INTO gis_point_batch VALUES (?)', [ + [ + { + type: 'Point', + coordinates: [10, 10] + } + ], + [ + { + type: 'Point', + coordinates: [20, 10] + } + ], + [ + { + type: 'Point', + coordinates: [20, 20] + } + ], + [ + { + type: 'Point', + coordinates: [10, 20] + } + ], + [ + { + type: 'Point', + coordinates: [] + } + ], + [ + { + type: 'Point' + } + ] + ]); + const rows = await shareConn.query('SELECT * FROM gis_point_batch'); + assert.deepEqual(rows, [ + { + g: { + type: 'Point', + coordinates: [10, 10] + } + }, + { + g: { + type: 'Point', + coordinates: [20, 10] + } + }, + { + g: { + type: 'Point', + coordinates: [20, 20] + } + }, + { + g: { + type: 'Point', + coordinates: [10, 20] + } + }, + { + g: + shareConn.info.isMariaDB() && + shareConn.info.hasMinVersion(10, 5, 2) && + !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'Point' } + : null + }, + { + g: + shareConn.info.isMariaDB() && + shareConn.info.hasMinVersion(10, 5, 2) && + !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'Point' } + : null + } + ]); + shareConn.commit(); }); - it('LineString insert', function (done) { + it('LineString insert', async function () { if (!shareConn.info.isMariaDB()) this.skip(); - shareConn - .query('DROP TABLE IF EXISTS gis_line_batch') - .then(() => { - return shareConn.query('CREATE TABLE gis_line_batch (g LINESTRING)'); - }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - .then(() => { - return shareConn.batch('INSERT INTO gis_line_batch VALUES (?)', [ - [ - { - type: 'LineString', - coordinates: [ - [0, 0], - [0, 10], - [10, 0] - ] - } - ], - [ - { - type: 'LineString', - coordinates: [[0, 10]] - } - ], - [ - { - type: 'LineString', - coordinates: [] - } - ], - [ - { - type: 'LineString' - } + await shareConn.query('DROP TABLE IF EXISTS gis_line_batch'); + await shareConn.query('CREATE TABLE gis_line_batch (g LINESTRING)'); + await shareConn.query('FLUSH TABLES'); + await shareConn.beginTransaction(); + await shareConn.batch('INSERT INTO gis_line_batch VALUES (?)', [ + [ + { + type: 'LineString', + coordinates: [ + [0, 0], + [0, 10], + [10, 0] ] - ]); - }) - .then(() => { - return shareConn.query('SELECT * FROM gis_line_batch'); - }) - .then((rows) => { - if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { - assert.deepEqual(rows, [ - { - g: { - type: 'LineString', - coordinates: [ - [0, 0], - [0, 10], - [10, 0] - ] - } - }, - { - g: { - type: 'LineString', - coordinates: [[0, 10]] - } - }, - { - g: supportBulk - ? { - coordinates: [], - type: 'LineString' - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'LineString' } - : null - }, - { - g: - shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'LineString' } - : null - } - ]); - } else { - assert.deepEqual(rows, [ - { - g: { - type: 'LineString', - coordinates: [ - [0, 0], - [0, 10], - [10, 0] - ] - } - }, - { - g: { - type: 'LineString', - coordinates: [[0, 10]] + } + ], + [ + { + type: 'LineString', + coordinates: [[0, 10]] + } + ], + [ + { + type: 'LineString', + coordinates: [] + } + ], + [ + { + type: 'LineString' + } + ] + ]); + const rows = await shareConn.query('SELECT * FROM gis_line_batch'); + if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { + assert.deepEqual(rows, [ + { + g: { + type: 'LineString', + coordinates: [ + [0, 0], + [0, 10], + [10, 0] + ] + } + }, + { + g: { + type: 'LineString', + coordinates: [[0, 10]] + } + }, + { + g: supportBulk + ? { + coordinates: [], + type: 'LineString' } - }, - { - g: null - }, - { - g: null - } - ]); + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'LineString' } + : null + }, + { + g: + shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'LineString' } + : null + } + ]); + } else { + assert.deepEqual(rows, [ + { + g: { + type: 'LineString', + coordinates: [ + [0, 0], + [0, 10], + [10, 0] + ] + } + }, + { + g: { + type: 'LineString', + coordinates: [[0, 10]] + } + }, + { + g: null + }, + { + g: null } - done(); - }) - .catch(done); + ]); + } + shareConn.commit(); }); - it('Polygon insert', function (done) { + it('Polygon insert', async function () { if (!shareConn.info.isMariaDB()) this.skip(); - shareConn - .query('DROP TABLE IF EXISTS gis_polygon_batch') - .then(() => { - return shareConn.query('CREATE TABLE gis_polygon_batch (g POLYGON)'); - }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - - .then(() => { - return shareConn.batch('INSERT INTO gis_polygon_batch VALUES (?)', [ - [ - { - type: 'Polygon', - coordinates: [ - [ - [10, 10], - [20, 10], - [20, 20], - [10, 20], - [10, 10] - ] - ] - } - ], - [ - { - type: 'Polygon', - coordinates: [ - [ - [0, 0], - [50, 0], - [50, 50], - [0, 50], - [0, 0] - ], - [ - [10, 10], - [20, 10], - [20, 20], - [10, 20], - [10, 10] - ] + await shareConn.query('DROP TABLE IF EXISTS gis_polygon_batch'); + await shareConn.query('CREATE TABLE gis_polygon_batch (g POLYGON)'); + await shareConn.query('FLUSH TABLES'); + await shareConn.beginTransaction(); + await shareConn.batch('INSERT INTO gis_polygon_batch VALUES (?)', [ + [ + { + type: 'Polygon', + coordinates: [ + [ + [10, 10], + [20, 10], + [20, 20], + [10, 20], + [10, 10] + ] + ] + } + ], + [ + { + type: 'Polygon', + coordinates: [ + [ + [0, 0], + [50, 0], + [50, 50], + [0, 50], + [0, 0] + ], + [ + [10, 10], + [20, 10], + [20, 20], + [10, 20], + [10, 10] + ] + ] + } + ], + [ + { + type: 'Polygon', + coordinates: [ + [[0, 0], [50]], + [ + [10, 10], + [20, 10] + ] + ] + } + ], + [ + { + type: 'Polygon', + coordinates: [] + } + ], + [ + { + type: 'Polygon' + } + ] + ]); + const rows = await shareConn.query('SELECT * FROM gis_polygon_batch'); + if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { + assert.deepEqual(rows, [ + { + g: { + type: 'Polygon', + coordinates: [ + [ + [10, 10], + [20, 10], + [20, 20], + [10, 20], + [10, 10] ] - } - ], - [ - { - type: 'Polygon', - coordinates: [ - [[0, 0], [50]], - [ - [10, 10], - [20, 10] - ] + ] + } + }, + { + g: { + type: 'Polygon', + coordinates: [ + [ + [0, 0], + [50, 0], + [50, 50], + [0, 50], + [0, 0] + ], + [ + [10, 10], + [20, 10], + [20, 20], + [10, 20], + [10, 10] ] - } - ], - [ - { - type: 'Polygon', - coordinates: [] - } - ], - [ - { - type: 'Polygon' - } - ] - ]); - }) - .then(() => { - return shareConn.query('SELECT * FROM gis_polygon_batch'); - }) - .then((rows) => { - if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { - assert.deepEqual(rows, [ - { - g: { - type: 'Polygon', - coordinates: [ - [ - [10, 10], - [20, 10], - [20, 20], - [10, 20], - [10, 10] - ] - ] - } - }, - { - g: { - type: 'Polygon', - coordinates: [ - [ - [0, 0], - [50, 0], - [50, 50], - [0, 50], - [0, 0] - ], - [ - [10, 10], - [20, 10], - [20, 20], - [10, 20], - [10, 10] - ] - ] - } - }, - { - g: - shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'Polygon' } - : null - }, - { - g: supportBulk - ? { - type: 'Polygon', - coordinates: [] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'Polygon' } - : null - }, - { - g: - shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'Polygon' } - : null - } - ]); - } else { - assert.deepEqual(rows, [ - { - g: { - type: 'Polygon', - coordinates: [ - [ - [10, 10], - [20, 10], - [20, 20], - [10, 20], - [10, 10] - ] - ] - } - }, - { - g: { + ] + } + }, + { + g: + shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'Polygon' } + : null + }, + { + g: supportBulk + ? { type: 'Polygon', - coordinates: [ - [ - [0, 0], - [50, 0], - [50, 50], - [0, 50], - [0, 0] - ], - [ - [10, 10], - [20, 10], - [20, 20], - [10, 20], - [10, 10] - ] - ] + coordinates: [] } - }, - { - g: null - }, - { - g: null - }, - { - g: null - } - ]); + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'Polygon' } + : null + }, + { + g: + shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'Polygon' } + : null } - done(); - }) - .catch(done); - }); - - it('MultiPoint insert', function (done) { - if (!shareConn.info.isMariaDB()) this.skip(); - shareConn - .query('DROP TABLE IF EXISTS gis_multi_point_batch') - .then(() => { - return shareConn.query('CREATE TABLE gis_multi_point_batch (g MULTIPOINT)'); - }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - - .then(() => { - return shareConn.batch('INSERT INTO gis_multi_point_batch VALUES (?)', [ - [ - { - type: 'MultiPoint', - coordinates: [ - [30, 30], + ]); + } else { + assert.deepEqual(rows, [ + { + g: { + type: 'Polygon', + coordinates: [ + [ [10, 10], + [20, 10], + [20, 20], [10, 20], - [20, 20] + [10, 10] ] - } - ], - [{ type: 'MultiPoint', coordinates: [[10, 0]] }], - [{ type: 'MultiPoint', coordinates: [] }], - [{ type: 'MultiPoint' }] - ]); - }) - .then(() => { - return shareConn.query('SELECT * FROM gis_multi_point_batch'); - }) - .then((rows) => { - if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { - assert.deepEqual(rows, [ - { - g: { - type: 'MultiPoint', - coordinates: [ - [30, 30], - [10, 10], - [10, 20], - [20, 20] - ] - } - }, - { - g: { - type: 'MultiPoint', - coordinates: [[10, 0]] - } - }, - { - g: supportBulk - ? { - type: 'MultiPoint', - coordinates: [] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiPoint' } - : null - }, - { - g: supportBulk - ? { - type: 'MultiPoint', - coordinates: [] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiPoint' } - : null - } - ]); - } else { - assert.deepEqual(rows, [ - { - g: { + ] + } + }, + { + g: { + type: 'Polygon', + coordinates: [ + [ + [0, 0], + [50, 0], + [50, 50], + [0, 50], + [0, 0] + ], + [ + [10, 10], + [20, 10], + [20, 20], + [10, 20], + [10, 10] + ] + ] + } + }, + { + g: null + }, + { + g: null + }, + { + g: null + } + ]); + } + shareConn.commit(); + }); + + it('MultiPoint insert', async function () { + if (!shareConn.info.isMariaDB()) this.skip(); + await shareConn.query('DROP TABLE IF EXISTS gis_multi_point_batch'); + await shareConn.query('CREATE TABLE gis_multi_point_batch (g MULTIPOINT)'); + await shareConn.query('FLUSH TABLES'); + await shareConn.beginTransaction(); + await shareConn.batch('INSERT INTO gis_multi_point_batch VALUES (?)', [ + [ + { + type: 'MultiPoint', + coordinates: [ + [30, 30], + [10, 10], + [10, 20], + [20, 20] + ] + } + ], + [{ type: 'MultiPoint', coordinates: [[10, 0]] }], + [{ type: 'MultiPoint', coordinates: [] }], + [{ type: 'MultiPoint' }] + ]); + const rows = await shareConn.query('SELECT * FROM gis_multi_point_batch'); + if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { + assert.deepEqual(rows, [ + { + g: { + type: 'MultiPoint', + coordinates: [ + [30, 30], + [10, 10], + [10, 20], + [20, 20] + ] + } + }, + { + g: { + type: 'MultiPoint', + coordinates: [[10, 0]] + } + }, + { + g: supportBulk + ? { type: 'MultiPoint', - coordinates: [ - [30, 30], - [10, 10], - [10, 20], - [20, 20] - ] + coordinates: [] } - }, - { - g: { + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiPoint' } + : null + }, + { + g: supportBulk + ? { type: 'MultiPoint', - coordinates: [[10, 0]] + coordinates: [] } - }, - { - g: null - }, - { - g: null - } - ]); + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiPoint' } + : null + } + ]); + } else { + assert.deepEqual(rows, [ + { + g: { + type: 'MultiPoint', + coordinates: [ + [30, 30], + [10, 10], + [10, 20], + [20, 20] + ] + } + }, + { + g: { + type: 'MultiPoint', + coordinates: [[10, 0]] + } + }, + { + g: null + }, + { + g: null + } + ]); + } + shareConn.commit(); + }); + + it('Multi-line insert', async function () { + if (!shareConn.info.isMariaDB()) this.skip(); + await shareConn.query('DROP TABLE IF EXISTS gis_multi_line_batch'); + await shareConn.query('CREATE TABLE gis_multi_line_batch (g MULTILINESTRING)'); + await shareConn.query('FLUSH TABLES'); + await shareConn.beginTransaction(); + await shareConn.batch('INSERT INTO gis_multi_line_batch VALUES (?)', [ + [ + { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] + ], + [ + [16, 0], + [16, 23], + [16, 48] + ] + ] } - done(); - }) - .catch(done); - }); - - it('Multi-line insert', function (done) { - if (!shareConn.info.isMariaDB()) this.skip(); - shareConn - .query('DROP TABLE IF EXISTS gis_multi_line_batch') - .then(() => { - return shareConn.query('CREATE TABLE gis_multi_line_batch (g MULTILINESTRING)'); - }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - - .then(() => { - return shareConn.batch('INSERT INTO gis_multi_line_batch VALUES (?)', [ - [ - { - type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ], - [ - [16, 0], - [16, 23], - [16, 48] - ] + ], + [ + { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] + ] + ] + } + ], + [{ type: 'MultiLineString', coordinates: [[]] }], + [{ type: 'MultiLineString', coordinates: [] }], + [{ type: 'MultiLineString' }] + ]); + const rows = await shareConn.query('SELECT * FROM gis_multi_line_batch'); + if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { + assert.deepEqual(rows, [ + { + g: { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] + ], + [ + [16, 0], + [16, 23], + [16, 48] ] - } - ], - [ - { - type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ] + ] + } + }, + { + g: { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] ] - } - ], - [{ type: 'MultiLineString', coordinates: [[]] }], - [{ type: 'MultiLineString', coordinates: [] }], - [{ type: 'MultiLineString' }] - ]); - }) - - .then(() => { - return shareConn.query('SELECT * FROM gis_multi_line_batch'); - }) - .then((rows) => { - if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { - assert.deepEqual(rows, [ - { - g: { - type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ], - [ - [16, 0], - [16, 23], - [16, 48] - ] - ] - } - }, - { - g: { + ] + } + }, + { + g: supportBulk + ? { type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ] - ] + coordinates: [[]] } - }, - { - g: supportBulk - ? { - type: 'MultiLineString', - coordinates: [[]] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiLineString' } - : null - }, - { - g: supportBulk - ? { - type: 'MultiLineString', - coordinates: [] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiLineString' } - : null - }, - { - g: supportBulk - ? { - type: 'MultiLineString', - coordinates: [] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiLineString' } - : null - } - ]); - } else { - assert.deepEqual(rows, [ - { - g: { + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiLineString' } + : null + }, + { + g: supportBulk + ? { type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ], - [ - [16, 0], - [16, 23], - [16, 48] - ] - ] + coordinates: [] } - }, - { - g: { + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiLineString' } + : null + }, + { + g: supportBulk + ? { type: 'MultiLineString', - coordinates: [ - [ - [10, 48], - [10, 21], - [10, 0] - ] - ] + coordinates: [] } - }, - { - g: null - }, - { - g: null - }, - { - g: null - } - ]); + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiLineString' } + : null + } + ]); + } else { + assert.deepEqual(rows, [ + { + g: { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] + ], + [ + [16, 0], + [16, 23], + [16, 48] + ] + ] + } + }, + { + g: { + type: 'MultiLineString', + coordinates: [ + [ + [10, 48], + [10, 21], + [10, 0] + ] + ] + } + }, + { + g: null + }, + { + g: null + }, + { + g: null } - done(); - }) - .catch(done); + ]); + } + shareConn.commit(); }); - it('Multi-polygon insert', function (done) { + it('Multi-polygon insert', async function () { if (!shareConn.info.isMariaDB()) this.skip(); - shareConn - .query('DROP TABLE IF EXISTS gis_multi_polygon_batch') - .then(() => { - return shareConn.query('CREATE TABLE gis_multi_polygon_batch (g MULTIPOLYGON)'); - }) - .then(() => { - return shareConn.query('FLUSH TABLES'); - }) - - .then(() => { - return shareConn.batch('INSERT INTO gis_multi_polygon_batch VALUES (?)', [ - [ - { - type: 'MultiPolygon', - coordinates: [ + await shareConn.query('DROP TABLE IF EXISTS gis_multi_polygon_batch'); + await shareConn.query('CREATE TABLE gis_multi_polygon_batch (g MULTIPOLYGON)'); + await shareConn.query('FLUSH TABLES'); + await shareConn.beginTransaction(); + await shareConn.batch('INSERT INTO gis_multi_polygon_batch VALUES (?)', [ + [ + { + type: 'MultiPolygon', + coordinates: [ + [ + [ + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] + ], + [ + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] + ] + ], + [ + [ + [59, 18], + [67, 18], + [67, 13], + [59, 13], + [59, 18] + ] + ] + ] + } + ], + [ + { + type: 'MultiPolygon', + coordinates: [ + [ + [ + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] + ], + [ + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] + ] + ] + ] + } + ], + [ + { + type: 'MultiPolygon', + coordinates: [[[]]] + } + ], + [ + { + type: 'MultiPolygon', + coordinates: [[]] + } + ], + [ + { + type: 'MultiPolygon', + coordinates: [] + } + ], + [ + { + type: 'MultiPolygon' + } + ] + ]); + const rows = await shareConn.query('SELECT * FROM gis_multi_polygon_batch'); + if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { + assert.deepEqual(rows, [ + { + g: { + type: 'MultiPolygon', + coordinates: [ + [ [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] ], [ - [ - [59, 18], - [67, 18], - [67, 13], - [59, 13], - [59, 18] - ] + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] + ] + ], + [ + [ + [59, 18], + [67, 18], + [67, 13], + [59, 13], + [59, 18] ] ] - } - ], - [ - { - type: 'MultiPolygon', - coordinates: [ + ] + } + }, + { + g: { + type: 'MultiPolygon', + coordinates: [ + [ [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] + ], + [ + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] ] ] - } - ], - [ - { - type: 'MultiPolygon', - coordinates: [[[]]] - } - ], - [ - { - type: 'MultiPolygon', - coordinates: [[]] - } - ], - [ - { - type: 'MultiPolygon', - coordinates: [] - } - ], - [ - { - type: 'MultiPolygon' - } - ] - ]); - }) - .then(() => { - return shareConn.query('SELECT * FROM gis_multi_polygon_batch'); - }) - .then((rows) => { - if (shareConn.info.isMariaDB() && shareConn.info.hasMinVersion(10, 2, 0)) { - assert.deepEqual(rows, [ - { - g: { + ] + } + }, + { + g: supportBulk + ? { type: 'MultiPolygon', - coordinates: [ - [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] - ], - [ - [ - [59, 18], - [67, 18], - [67, 13], - [59, 13], - [59, 18] - ] - ] - ] + coordinates: [[[]]] } - }, - { - g: { + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiPolygon' } + : null + }, + { + g: supportBulk + ? { type: 'MultiPolygon', - coordinates: [ - [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] - ] - ] + coordinates: [[]] } - }, - { - g: supportBulk - ? { - type: 'MultiPolygon', - coordinates: [[[]]] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiPolygon' } - : null - }, - { - g: supportBulk - ? { - type: 'MultiPolygon', - coordinates: [[]] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiPolygon' } - : null - }, - { - g: supportBulk - ? { - type: 'MultiPolygon', - coordinates: [] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiPolygon' } - : null - }, - { - g: supportBulk - ? { - type: 'MultiPolygon', - coordinates: [] - } - : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'MultiPolygon' } - : null - } - ]); - } else { - assert.deepEqual(rows, [ - { - g: { + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiPolygon' } + : null + }, + { + g: supportBulk + ? { type: 'MultiPolygon', - coordinates: [ - [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] - ], - [ - [ - [59, 18], - [67, 18], - [67, 13], - [59, 13], - [59, 18] - ] - ] - ] + coordinates: [] } - }, - { - g: { + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiPolygon' } + : null + }, + { + g: supportBulk + ? { type: 'MultiPolygon', - coordinates: [ - [ - [ - [28, 26], - [28, 0], - [84, 0], - [84, 42], - [28, 26] - ], - [ - [52, 18], - [66, 23], - [73, 9], - [48, 6], - [52, 18] - ] - ] - ] + coordinates: [] } - }, - { - g: null - }, - { - g: null - }, - { - g: null - }, - { - g: null - } - ]); + : shareConn.info.hasMinVersion(10, 5, 2) && !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'MultiPolygon' } + : null + } + ]); + } else { + assert.deepEqual(rows, [ + { + g: { + type: 'MultiPolygon', + coordinates: [ + [ + [ + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] + ], + [ + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] + ] + ], + [ + [ + [59, 18], + [67, 18], + [67, 13], + [59, 13], + [59, 18] + ] + ] + ] + } + }, + { + g: { + type: 'MultiPolygon', + coordinates: [ + [ + [ + [28, 26], + [28, 0], + [84, 0], + [84, 42], + [28, 26] + ], + [ + [52, 18], + [66, 23], + [73, 9], + [48, 6], + [52, 18] + ] + ] + ] + } + }, + { + g: null + }, + { + g: null + }, + { + g: null + }, + { + g: null } - done(); - }) - .catch(done); + ]); + } + shareConn.commit(); }); it('Geometry collection insert', async function () { @@ -957,7 +886,8 @@ describe('batch geometry type', () => { const conn = await base.createConnection(); conn.query('DROP TABLE IF EXISTS gis_geometrycollection_batch'); conn.query('CREATE TABLE gis_geometrycollection_batch (g GEOMETRYCOLLECTION)'); - await shareConn.query('FLUSH TABLES'); + await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); await conn.batch('INSERT INTO gis_geometrycollection_batch VALUES (?)', [ [ { diff --git a/test/integration/test-big-query.js b/test/integration/test-big-query.js index a65d6aba..e65a3e2c 100644 --- a/test/integration/test-big-query.js +++ b/test/integration/test-big-query.js @@ -29,9 +29,11 @@ describe('Big query', function () { shareConn.query('DROP TABLE IF EXISTS bigParameterBigParam'); shareConn.query('CREATE TABLE bigParameterBigParam (b longblob)'); await shareConn.query('FLUSH TABLES'); + shareConn.beginTransaction(); shareConn.query('insert into bigParameterBigParam(b) values(?)', [buf]); const rows = await shareConn.query('SELECT * from bigParameterBigParam'); assert.deepEqual(rows[0].b, buf); + shareConn.commit(); }); it('int8 buffer overflow', async function () { @@ -40,6 +42,7 @@ describe('Big query', function () { conn.query('DROP TABLE IF EXISTS bigParameterInt8'); conn.query('CREATE TABLE bigParameterInt8 (a varchar(1024), b varchar(10))'); await conn.query('FLUSH TABLE'); + await conn.beginTransaction(); await conn.query('insert into bigParameterInt8 values(?, ?)', [buf.toString(), 'test']); const rows = await conn.query('SELECT * from bigParameterInt8'); assert.deepEqual(rows[0].a, buf.toString()); @@ -76,7 +79,8 @@ describe('Big query', function () { sqlInsert += ')'; conn.query('DROP TABLE IF EXISTS bigParameter'); conn.query(sql); - await shareConn.query('FLUSH TABLES'); + await conn.query('FLUSH TABLES'); + conn.beginTransaction(); await conn.beginTransaction(); conn.query(sqlInsert, params); const rows = await conn.query('SELECT * from bigParameter'); diff --git a/test/integration/test-connection-opts.js b/test/integration/test-connection-opts.js index c68daeb4..c88d24ee 100644 --- a/test/integration/test-connection-opts.js +++ b/test/integration/test-connection-opts.js @@ -263,240 +263,112 @@ describe('connection option', () => { conn.end(); }); - it('nestTables results boolean', function (done) { - base - .createConnection({ nestTables: true }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS t1') - .then(() => { - return conn.query('DROP TABLE IF EXISTS t2'); - }) - .then(() => { - return conn.query('CREATE TABLE t1 (a varchar(20))'); - }) - .then(() => { - return conn.query('CREATE TABLE t2 (b varchar(20))'); - }) - .then(() => { - return conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); - }) - .then(() => { - return conn.query("INSERT INTO t2 VALUES ('bou')"); - }) - .then(() => { - return conn.query('SELECT * FROM t1, t2'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { t1: { a: 'bla' }, t2: { b: 'bou' } }, - { t1: { a: 'bla2' }, t2: { b: 'bou' } } - ]); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('nestTables results boolean', async function () { + const conn = await base.createConnection({ nestTables: true }); + await conn.query('DROP TABLE IF EXISTS t1'); + await conn.query('DROP TABLE IF EXISTS t2'); + await conn.query('CREATE TABLE t1 (a varchar(20))'); + await conn.query('CREATE TABLE t2 (b varchar(20))'); + await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); + await conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); + await conn.query("INSERT INTO t2 VALUES ('bou')"); + const rows = await conn.query('SELECT * FROM t1, t2'); + assert.deepEqual(rows, [ + { t1: { a: 'bla' }, t2: { b: 'bou' } }, + { t1: { a: 'bla2' }, t2: { b: 'bou' } } + ]); + conn.end(); }); - it('nestTables results string', function (done) { - base - .createConnection({ nestTables: '_' }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS t1') - .then(() => { - return conn.query('DROP TABLE IF EXISTS t2'); - }) - .then(() => { - return conn.query('CREATE TABLE t1 (a varchar(20))'); - }) - .then(() => { - return conn.query('CREATE TABLE t2 (b varchar(20))'); - }) - .then(() => { - return conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); - }) - .then(() => { - return conn.query("INSERT INTO t2 VALUES ('bou')"); - }) - .then(() => { - return conn.query('SELECT * FROM t1, t2'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { t1_a: 'bla', t2_b: 'bou' }, - { t1_a: 'bla2', t2_b: 'bou' } - ]); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('nestTables results string', async function () { + const conn = await base.createConnection({ nestTables: '_' }); + await conn.query('DROP TABLE IF EXISTS t1'); + await conn.query('DROP TABLE IF EXISTS t2'); + await conn.query('CREATE TABLE t1 (a varchar(20))'); + await conn.query('CREATE TABLE t2 (b varchar(20))'); + await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); + await conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); + await conn.query("INSERT INTO t2 VALUES ('bou')"); + const rows = await conn.query('SELECT * FROM t1, t2'); + assert.deepEqual(rows, [ + { t1_a: 'bla', t2_b: 'bou' }, + { t1_a: 'bla2', t2_b: 'bou' } + ]); + conn.end(); }); - it('rows as array', function (done) { - base - .createConnection({ rowsAsArray: true }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS t1') - .then(() => { - return conn.query('DROP TABLE IF EXISTS t2'); - }) - .then(() => { - return conn.query('CREATE TABLE t1 (a varchar(20))'); - }) - .then(() => { - return conn.query('CREATE TABLE t2 (b varchar(20))'); - }) - .then(() => { - return conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); - }) - .then(() => { - return conn.query("INSERT INTO t2 VALUES ('bou')"); - }) - .then(() => { - return conn.query('SELECT * FROM t1, t2'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - ['bla', 'bou'], - ['bla2', 'bou'] - ]); - return conn.end(); - }) - .then(() => { - done(); - }) - .catch(done); - }) - .catch(done); + it('rows as array', async function () { + const conn = await base.createConnection({ rowsAsArray: true }); + await conn.query('DROP TABLE IF EXISTS t1'); + await conn.query('DROP TABLE IF EXISTS t2'); + await conn.query('CREATE TABLE t1 (a varchar(20))'); + await conn.query('CREATE TABLE t2 (b varchar(20))'); + await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); + await conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); + await conn.query("INSERT INTO t2 VALUES ('bou')"); + const rows = await conn.query('SELECT * FROM t1, t2'); + assert.deepEqual(rows, [ + ['bla', 'bou'], + ['bla2', 'bou'] + ]); + conn.end(); }); - it('query option rows as array', function (done) { - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS t1') - .then(() => { - return conn.query('DROP TABLE IF EXISTS t2'); - }) - .then(() => { - return conn.query('CREATE TABLE t1 (a varchar(20))'); - }) - .then(() => { - return conn.query('CREATE TABLE t2 (b varchar(20))'); - }) - .then(() => { - return conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); - }) - .then(() => { - return conn.query("INSERT INTO t2 VALUES ('bou')"); - }) - .then(() => { - return conn.query({ rowsAsArray: true, sql: 'SELECT * FROM t1, t2' }); - }) - .then((rows) => { - assert.deepEqual(rows, [ - ['bla', 'bou'], - ['bla2', 'bou'] - ]); - return conn.end(); - }) - .then(() => { - done(); - }) - .catch(done); - }) - .catch(done); + it('query option rows as array', async function () { + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS t1'); + await conn.query('DROP TABLE IF EXISTS t2'); + await conn.query('CREATE TABLE t1 (a varchar(20))'); + await conn.query('CREATE TABLE t2 (b varchar(20))'); + await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); + await conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); + await conn.query("INSERT INTO t2 VALUES ('bou')"); + const rows = await conn.query({ rowsAsArray: true, sql: 'SELECT * FROM t1, t2' }); + assert.deepEqual(rows, [ + ['bla', 'bou'], + ['bla2', 'bou'] + ]); + conn.end(); }); - it('nestTables results query boolean', function (done) { - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS t1') - .then(() => { - return conn.query('DROP TABLE IF EXISTS t2'); - }) - .then(() => { - return conn.query('CREATE TABLE t1 (a varchar(20))'); - }) - .then(() => { - return conn.query('CREATE TABLE t2 (b varchar(20))'); - }) - .then(() => { - return conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); - }) - .then(() => { - return conn.query("INSERT INTO t2 VALUES ('bou')"); - }) - .then(() => { - return conn.query({ nestTables: true, sql: 'SELECT * FROM t1, t2' }); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { t1: { a: 'bla' }, t2: { b: 'bou' } }, - { t1: { a: 'bla2' }, t2: { b: 'bou' } } - ]); - return conn.end(); - }) - .then(() => { - done(); - }) - .catch(done); - }) - .catch(done); + it('nestTables results query boolean', async function () { + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS t1'); + await conn.query('DROP TABLE IF EXISTS t2'); + await conn.query('CREATE TABLE t1 (a varchar(20))'); + await conn.query('CREATE TABLE t2 (b varchar(20))'); + await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); + await conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); + await conn.query("INSERT INTO t2 VALUES ('bou')"); + const rows = await conn.query({ nestTables: true, sql: 'SELECT * FROM t1, t2' }); + assert.deepEqual(rows, [ + { t1: { a: 'bla' }, t2: { b: 'bou' } }, + { t1: { a: 'bla2' }, t2: { b: 'bou' } } + ]); + conn.end(); }); - it('nestTables results query string', function (done) { - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS t1') - .then(() => { - return conn.query('DROP TABLE IF EXISTS t2'); - }) - .then(() => { - return conn.query('CREATE TABLE t1 (a varchar(20))'); - }) - .then(() => { - return conn.query('CREATE TABLE t2 (b varchar(20))'); - }) - .then(() => { - return conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); - }) - .then(() => { - return conn.query("INSERT INTO t2 VALUES ('bou')"); - }) - .then(() => { - return conn.query({ nestTables: '_', sql: 'SELECT * FROM t1, t2' }); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { t1_a: 'bla', t2_b: 'bou' }, - { t1_a: 'bla2', t2_b: 'bou' } - ]); - return conn.end(); - }) - .then(() => { - done(); - }) - .catch(done); - }) - .catch(done); + it('nestTables results query string', async function () { + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS t1'); + await conn.query('DROP TABLE IF EXISTS t2'); + await conn.query('CREATE TABLE t1 (a varchar(20))'); + await conn.query('CREATE TABLE t2 (b varchar(20))'); + await conn.query('FLUSH TABLES'); + await conn.beginTransaction(); + await conn.query("INSERT INTO t1 VALUES ('bla'), ('bla2')"); + await conn.query("INSERT INTO t2 VALUES ('bou')"); + const rows = await conn.query({ nestTables: '_', sql: 'SELECT * FROM t1, t2' }); + assert.deepEqual(rows, [ + { t1_a: 'bla', t2_b: 'bou' }, + { t1_a: 'bla2', t2_b: 'bou' } + ]); + await conn.end(); }); it('force version check', function (done) { diff --git a/test/integration/test-local-infile.js b/test/integration/test-local-infile.js index 9a1d36c9..2c2660f6 100644 --- a/test/integration/test-local-infile.js +++ b/test/integration/test-local-infile.js @@ -160,102 +160,62 @@ describe('local-infile', () => { .catch(done); }); - it('small local infile', function (done) { + it('small local infile', async function () { const self = this; - shareConn - .query('select @@local_infile') - .then((rows) => { - if (rows[0]['@@local_infile'] === 0) { - self.skip(); - } - return new Promise(function (resolve, reject) { - fs.writeFile(smallFileName, '1,hello\n2,world\n', 'utf8', function (err) { - if (err) reject(err); - else resolve(); - }); - }); - }) - .then(() => { - base - .createConnection({ permitLocalInfile: true }) - .then((conn) => { - shareConn - .query('DROP TABLE IF EXISTS smallLocalInfile') - .then(() => { - return conn.query('CREATE TABLE smallLocalInfile(id int, test varchar(100))'); - }) - .then(() => { - return conn.query( - "LOAD DATA LOCAL INFILE '" + - smallFileName.replace(/\\/g, '/') + - "' INTO TABLE smallLocalInfile FIELDS TERMINATED BY ',' (id, test)" - ); - }) - .then(() => { - return conn.query('SELECT * FROM smallLocalInfile'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { id: 1, test: 'hello' }, - { id: 2, test: 'world' } - ]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); - }) - .catch(done); + const rows = await shareConn.query('select @@local_infile'); + if (rows[0]['@@local_infile'] === 0) { + return self.skip(); + } + await new Promise(function (resolve, reject) { + fs.writeFile(smallFileName, '1,hello\n2,world\n', 'utf8', function (err) { + if (err) reject(err); + else resolve(); + }); + }); + const conn = await base.createConnection({ permitLocalInfile: true }); + await conn.query('DROP TABLE IF EXISTS smallLocalInfile'); + await conn.query('CREATE TABLE smallLocalInfile(id int, test varchar(100))'); + await conn.beginTransaction(); + await conn.query( + "LOAD DATA LOCAL INFILE '" + + smallFileName.replace(/\\/g, '/') + + "' INTO TABLE smallLocalInfile FIELDS TERMINATED BY ',' (id, test)" + ); + const rows2 = await conn.query('SELECT * FROM smallLocalInfile'); + assert.deepEqual(rows2, [ + { id: 1, test: 'hello' }, + { id: 2, test: 'world' } + ]); + conn.end(); }); - it('small local infile with non supported node.js encoding', function (done) { + it('small local infile with non supported node.js encoding', async function () { const self = this; - shareConn - .query('select @@local_infile') - .then((rows) => { - if (rows[0]['@@local_infile'] === 0) { - self.skip(); - } - return new Promise(function (resolve, reject) { - fs.writeFile(smallFileName, '1,hello\n2,world\n', 'utf8', function (err) { - if (err) reject(err); - else resolve(); - }); - }); - }) - .then(() => { - base - .createConnection({ permitLocalInfile: true, charset: 'big5' }) - .then((conn) => { - shareConn - .query('DROP TABLE IF EXISTS smallLocalInfile') - .then(() => { - return conn.query('CREATE TABLE smallLocalInfile(id int, test varchar(100))'); - }) - .then(() => { - return conn.query( - "LOAD DATA LOCAL INFILE '" + - smallFileName.replace(/\\/g, '/') + - "' INTO TABLE smallLocalInfile FIELDS TERMINATED BY ',' (id, test)" - ); - }) - .then(() => { - return conn.query('SELECT * FROM smallLocalInfile'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { id: 1, test: 'hello' }, - { id: 2, test: 'world' } - ]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); - }) - .catch(done); + const rows = await shareConn.query('select @@local_infile'); + if (rows[0]['@@local_infile'] === 0) { + return self.skip(); + } + await new Promise(function (resolve, reject) { + fs.writeFile(smallFileName, '1,hello\n2,world\n', 'utf8', function (err) { + if (err) reject(err); + else resolve(); + }); + }); + const conn = await base.createConnection({ permitLocalInfile: true, charset: 'big5' }); + await conn.query('DROP TABLE IF EXISTS smallLocalInfile'); + await conn.query('CREATE TABLE smallLocalInfile(id int, test varchar(100))'); + await conn.beginTransaction(); + await conn.query( + "LOAD DATA LOCAL INFILE '" + + smallFileName.replace(/\\/g, '/') + + "' INTO TABLE smallLocalInfile FIELDS TERMINATED BY ',' (id, test)" + ); + const rows2 = await conn.query('SELECT * FROM smallLocalInfile'); + assert.deepEqual(rows2, [ + { id: 1, test: 'hello' }, + { id: 2, test: 'world' } + ]); + conn.end(); }); it('non readable local infile', function (done) { @@ -310,81 +270,59 @@ describe('local-infile', () => { .catch(done); }); - it('big local infile', function (done) { + it('big local infile', async function () { this.timeout(180000); let size; const self = this; - shareConn - .query('select @@local_infile') - .then((rows) => { - if (rows[0]['@@local_infile'] === 0) { - self.skip(); - } - return shareConn.query('SELECT @@max_allowed_packet as t'); - }) - .then((rows) => { - const maxAllowedSize = rows[0].t; - size = Math.round((maxAllowedSize - 100) / 16); - const header = '"a","b"\n'; - const headerLen = header.length; - const buf = Buffer.allocUnsafe(size * 16 + headerLen); - buf.write(header); - for (let i = 0; i < size; i++) { - buf.write('"a' + padStartZero(i, 8) + '","b"\n', i * 16 + headerLen); - } - return new Promise(function (resolve, reject) { - fs.writeFile(bigFileName, buf, function (err) { - if (err) reject(err); - else resolve(); - }); - }); - }) - .then(() => { - base - .createConnection({ permitLocalInfile: true }) - .then((conn) => { - shareConn - .query('DROP TABLE IF EXISTS bigLocalInfile') - .then(() => { - return conn.query('CREATE TABLE bigLocalInfile(t1 varchar(10), t2 varchar(2))'); - }) - .then(() => { - return conn.query( - "LOAD DATA LOCAL INFILE '" + - bigFileName.replace(/\\/g, '/') + - "' INTO TABLE bigLocalInfile " + - "COLUMNS TERMINATED BY ',' ENCLOSED BY '\\\"' ESCAPED BY '\\\\' " + - "LINES TERMINATED BY '\\n' IGNORE 1 LINES " + - '(t1, t2)' - ); - }) - .then(() => { - return conn.query('SELECT * FROM bigLocalInfile'); - }) - .then((rows) => { - assert.equal(rows.length, size); - for (let i = 0; i < size; i++) { - if (rows[i].t1 !== 'a' + padStartZero(i, 8) && rows[i].t2 !== 'b') { - console.log( - 'result differ (no:' + - i + - ') t1=' + - rows[i].t1 + - ' != ' + - padStartZero(i, 8) + - ' t2=' + - rows[i].t2 - ); - } - } - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); - }) - .catch(done); + let rows = await shareConn.query('select @@local_infile'); + if (rows[0]['@@local_infile'] === 0) { + return self.skip(); + } + rows = await shareConn.query('SELECT @@max_allowed_packet as t'); + const maxAllowedSize = rows[0].t; + size = Math.round((maxAllowedSize - 100) / 16); + const header = '"a","b"\n'; + const headerLen = header.length; + const buf = Buffer.allocUnsafe(size * 16 + headerLen); + buf.write(header); + for (let i = 0; i < size; i++) { + buf.write('"a' + padStartZero(i, 8) + '","b"\n', i * 16 + headerLen); + } + await new Promise(function (resolve, reject) { + fs.writeFile(bigFileName, buf, function (err) { + if (err) reject(err); + else resolve(); + }); + }); + const conn = await base.createConnection({ permitLocalInfile: true }); + await conn.query('DROP TABLE IF EXISTS bigLocalInfile'); + await conn.query('CREATE TABLE bigLocalInfile(t1 varchar(10), t2 varchar(2))'); + await conn.beginTransaction(); + await conn.query( + "LOAD DATA LOCAL INFILE '" + + bigFileName.replace(/\\/g, '/') + + "' INTO TABLE bigLocalInfile " + + "COLUMNS TERMINATED BY ',' ENCLOSED BY '\\\"' ESCAPED BY '\\\\' " + + "LINES TERMINATED BY '\\n' IGNORE 1 LINES " + + '(t1, t2)' + ); + rows = await conn.query('SELECT * FROM bigLocalInfile'); + assert.equal(rows.length, size); + for (let i = 0; i < size; i++) { + if (rows[i].t1 !== 'a' + padStartZero(i, 8) && rows[i].t2 !== 'b') { + console.log( + 'result differ (no:' + + i + + ') t1=' + + rows[i].t1 + + ' != ' + + padStartZero(i, 8) + + ' t2=' + + rows[i].t2 + ); + } + } + conn.end(); }); function padStartZero(val, length) { diff --git a/test/integration/test-metadata.js b/test/integration/test-metadata.js index d942ca9e..d9dbca44 100644 --- a/test/integration/test-metadata.js +++ b/test/integration/test-metadata.js @@ -6,72 +6,63 @@ const Collations = require('../../lib/const/collations.js'); const FieldType = require('../../lib/const/field-type'); describe('metadata', () => { - it('result metadata values', function (done) { - shareConn - .query('DROP TABLE IF EXISTS metadatatable') - .then(() => { - return shareConn.query( - 'CREATE TABLE metadatatable (id BIGINT not null primary key auto_increment, ' + - 't varchar(32) UNIQUE, ' + - 'd DECIMAL(10,4) UNSIGNED ZEROFILL, ' + - 'ds DECIMAL(10,4) SIGNED, ' + - 'd2 DECIMAL(10,0) UNSIGNED, ' + - 'ds2 DECIMAL(10,0) SIGNED ' + - ") COLLATE='utf8mb4_unicode_ci'" - ); - }) - .then(() => { - return shareConn.query( - 'SELECT id as id1, t as t1, d as d1, ds as d2, d2 as d3, ds2 as d4 FROM metadatatable as tm' - ); - }) - .then((rows) => { - assert.equal(rows.meta.length, 6); + it('result metadata values', async function () { + await shareConn.query('DROP TABLE IF EXISTS metadatatable'); + await shareConn.query( + 'CREATE TABLE metadatatable (id BIGINT not null primary key auto_increment, ' + + 't varchar(32) UNIQUE, ' + + 'd DECIMAL(10,4) UNSIGNED ZEROFILL, ' + + 'ds DECIMAL(10,4) SIGNED, ' + + 'd2 DECIMAL(10,0) UNSIGNED, ' + + 'ds2 DECIMAL(10,0) SIGNED ' + + ") COLLATE='utf8mb4_unicode_ci'" + ); + await shareConn.query('FLUSH TABLES'); + const rows = await shareConn.query( + 'SELECT id as id1, t as t1, d as d1, ds as d2, d2 as d3, ds2 as d4 FROM metadatatable as tm' + ); + assert.equal(rows.meta.length, 6); - assert.equal(rows.meta[0].db(), 'testn'); - assert.equal(rows.meta[0].schema(), 'testn'); - assert.equal(rows.meta[0].table(), 'tm'); - assert.equal(rows.meta[0].orgTable(), 'metadatatable'); - assert.equal(rows.meta[0].name(), 'id1'); - assert.equal(rows.meta[0].orgName(), 'id'); - assert.equal(rows.meta[0].collation, Collations.fromName('BINARY')); - assert.equal(rows.meta[0].columnLength, 20); - assert.equal(rows.meta[0].columnType, FieldType.LONGLONG); + assert.equal(rows.meta[0].db(), 'testn'); + assert.equal(rows.meta[0].schema(), 'testn'); + assert.equal(rows.meta[0].table(), 'tm'); + assert.equal(rows.meta[0].orgTable(), 'metadatatable'); + assert.equal(rows.meta[0].name(), 'id1'); + assert.equal(rows.meta[0].orgName(), 'id'); + assert.equal(rows.meta[0].collation, Collations.fromName('BINARY')); + assert.equal(rows.meta[0].columnLength, 20); + assert.equal(rows.meta[0].columnType, FieldType.LONGLONG); - assert.equal(rows.meta[1].db(), 'testn'); - assert.equal(rows.meta[1].schema(), 'testn'); - assert.equal(rows.meta[1].table(), 'tm'); - assert.equal(rows.meta[1].orgTable(), 'metadatatable'); - assert.equal(rows.meta[1].name(), 't1'); - assert.equal(rows.meta[1].orgName(), 't'); - if (base.utf8Collation()) { - assert.equal(rows.meta[1].collation, Collations.fromName('UTF8MB4_UNICODE_CI')); - assert.equal(rows.meta[1].columnLength, 128); - } - assert.equal(rows.meta[1].columnType, FieldType.VAR_STRING); + assert.equal(rows.meta[1].db(), 'testn'); + assert.equal(rows.meta[1].schema(), 'testn'); + assert.equal(rows.meta[1].table(), 'tm'); + assert.equal(rows.meta[1].orgTable(), 'metadatatable'); + assert.equal(rows.meta[1].name(), 't1'); + assert.equal(rows.meta[1].orgName(), 't'); + if (base.utf8Collation()) { + assert.equal(rows.meta[1].collation, Collations.fromName('UTF8MB4_UNICODE_CI')); + assert.equal(rows.meta[1].columnLength, 128); + } + assert.equal(rows.meta[1].columnType, FieldType.VAR_STRING); - assert.equal(rows.meta[2].db(), 'testn'); - assert.equal(rows.meta[2].schema(), 'testn'); - assert.equal(rows.meta[2].table(), 'tm'); - assert.equal(rows.meta[2].orgTable(), 'metadatatable'); - assert.equal(rows.meta[2].name(), 'd1'); - assert.equal(rows.meta[2].orgName(), 'd'); - assert.equal(rows.meta[2].collation, Collations.fromName('BINARY')); - assert.equal(rows.meta[2].columnLength, 11); - assert.equal(rows.meta[2].columnType, FieldType.NEWDECIMAL); + assert.equal(rows.meta[2].db(), 'testn'); + assert.equal(rows.meta[2].schema(), 'testn'); + assert.equal(rows.meta[2].table(), 'tm'); + assert.equal(rows.meta[2].orgTable(), 'metadatatable'); + assert.equal(rows.meta[2].name(), 'd1'); + assert.equal(rows.meta[2].orgName(), 'd'); + assert.equal(rows.meta[2].collation, Collations.fromName('BINARY')); + assert.equal(rows.meta[2].columnLength, 11); + assert.equal(rows.meta[2].columnType, FieldType.NEWDECIMAL); - assert.equal(rows.meta[3].db(), 'testn'); - assert.equal(rows.meta[3].schema(), 'testn'); - assert.equal(rows.meta[3].table(), 'tm'); - assert.equal(rows.meta[3].orgTable(), 'metadatatable'); - assert.equal(rows.meta[3].name(), 'd2'); - assert.equal(rows.meta[3].orgName(), 'ds'); - assert.equal(rows.meta[3].collation, Collations.fromName('BINARY')); - assert.equal(rows.meta[3].columnLength, 12); - assert.equal(rows.meta[3].columnType, FieldType.NEWDECIMAL); - - done(); - }) - .catch(done); + assert.equal(rows.meta[3].db(), 'testn'); + assert.equal(rows.meta[3].schema(), 'testn'); + assert.equal(rows.meta[3].table(), 'tm'); + assert.equal(rows.meta[3].orgTable(), 'metadatatable'); + assert.equal(rows.meta[3].name(), 'd2'); + assert.equal(rows.meta[3].orgName(), 'ds'); + assert.equal(rows.meta[3].collation, Collations.fromName('BINARY')); + assert.equal(rows.meta[3].columnLength, 12); + assert.equal(rows.meta[3].columnType, FieldType.NEWDECIMAL); }); }); diff --git a/test/integration/test-multi-results.js b/test/integration/test-multi-results.js index 1755afc8..bbe58fb0 100644 --- a/test/integration/test-multi-results.js +++ b/test/integration/test-multi-results.js @@ -38,169 +38,80 @@ describe('multi-results', () => { .catch(done); }); - it('duplicate column', function (done) { - base - .createConnection() - .then((conn) => { - shareConn - .query('DROP TABLE IF EXISTS t') - .then(() => { - return conn.query('CREATE TABLE t (i int)'); - }) - .then(() => { - return conn.query('INSERT INTO t(i) VALUES (1)'); - }) - .then(() => { - return conn.query({ rowsAsArray: true, sql: 'SELECT i, i FROM t' }); - }) - .then((res) => { - conn - .query('SELECT i, i FROM t') - .then((res) => { - conn.end(); - done(new Error('must have thrown an error')); - }) - .catch((err) => { - assert.isTrue(err.message.includes('Error in results, duplicate field name `i`')); - assert.equal(err.errno, 45040); - assert.equal(err.sqlState, 42000); - assert.equal(err.code, 'ER_DUPLICATE_FIELD'); - conn - .rollback() - .then(() => { - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('duplicate column', async function () { + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS t'); + await conn.query('CREATE TABLE t (i int)'); + await conn.beginTransaction(); + await conn.query('INSERT INTO t(i) VALUES (1)'); + await conn.query({ rowsAsArray: true, sql: 'SELECT i, i FROM t' }); + try { + await conn.query('SELECT i, i FROM t'); + throw new Error('must have thrown an error'); + } catch (err) { + assert.isTrue(err.message.includes('Error in results, duplicate field name `i`')); + assert.equal(err.errno, 45040); + assert.equal(err.sqlState, 42000); + assert.equal(err.code, 'ER_DUPLICATE_FIELD'); + conn.rollback(); + conn.end(); + } }); - it('duplicate column disabled', function (done) { - base - .createConnection({ checkDuplicate: false }) - .then((conn) => { - shareConn - .query('DROP TABLE IF EXISTS t') - .then(() => { - return conn.query('CREATE TABLE t (i int)'); - }) - .then(() => { - return conn.query('INSERT INTO t(i) VALUES (1)'); - }) - .then(() => { - return conn.query({ rowsAsArray: true, sql: 'SELECT i, i FROM t' }); - }) - .then((res) => { - conn - .query('SELECT i, i FROM t') - .then((res) => { - assert.deepEqual(res, [ - { - i: 1 - } - ]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('duplicate column disabled', async function () { + const conn = await base.createConnection({ checkDuplicate: false }); + await conn.query('DROP TABLE IF EXISTS t'); + await conn.query('CREATE TABLE t (i int)'); + await conn.beginTransaction(); + await conn.query('INSERT INTO t(i) VALUES (1)'); + await conn.query({ rowsAsArray: true, sql: 'SELECT i, i FROM t' }); + const res = await conn.query('SELECT i, i FROM t'); + assert.deepEqual(res, [ + { + i: 1 + } + ]); + conn.end(); }); - it('duplicate column nestTables', function (done) { - base - .createConnection({ nestTables: true }) - .then((conn) => { - shareConn - .query('DROP TABLE IF EXISTS t') - .then(() => { - return conn.query('CREATE TABLE t (i int)'); - }) - .then(() => { - return conn.query('INSERT INTO t(i) VALUES (1)'); - }) - .then(() => { - return conn.query({ rowsAsArray: true, sql: 'SELECT i, i FROM t' }); - }) - .then((res) => { - conn - .query('SELECT i, i FROM t') - .then((res) => { - conn.end(); - done(new Error('must have thrown an error')); - }) - .catch((err) => { - assert.isTrue( - err.message.includes('Error in results, duplicate field name `t`.`i`') - ); - assert.equal(err.errno, 45040); - assert.equal(err.sqlState, 42000); - assert.equal(err.code, 'ER_DUPLICATE_FIELD'); - conn.end(); - done(); - }); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('duplicate column nestTables', async function () { + const conn = await base.createConnection({ nestTables: true }); + await conn.query('DROP TABLE IF EXISTS t'); + await conn.query('CREATE TABLE t (i int)'); + await conn.beginTransaction(); + + await conn.query('INSERT INTO t(i) VALUES (1)'); + await conn.query({ rowsAsArray: true, sql: 'SELECT i, i FROM t' }); + try { + await conn.query('SELECT i, i FROM t'); + conn.end(); + throw new Error('must have thrown an error'); + } catch (err) { + assert.isTrue(err.message.includes('Error in results, duplicate field name `t`.`i`')); + assert.equal(err.errno, 45040); + assert.equal(err.sqlState, 42000); + assert.equal(err.code, 'ER_DUPLICATE_FIELD'); + conn.end(); + } }); - it('duplicate column disabled nestTables', function (done) { - base - .createConnection({ checkDuplicate: false, nestTables: true }) - .then((conn) => { - shareConn - .query('DROP TABLE IF EXISTS t') - .then(() => { - return conn.query('CREATE TABLE t (i int)'); - }) - .then(() => { - return conn.query('INSERT INTO t(i) VALUES (1)'); - }) - .then(() => { - return conn.query({ rowsAsArray: true, sql: 'SELECT i, i FROM t' }); - }) + it('duplicate column disabled nestTables', async function () { + const conn = await base.createConnection({ checkDuplicate: false, nestTables: true }); + await conn.query('DROP TABLE IF EXISTS t'); + await conn.query('CREATE TABLE t (i int)'); + await conn.beginTransaction(); - .then((res) => { - conn - .query('SELECT i, i FROM t') - .then((res) => { - assert.deepEqual(res, [ - { - t: { - i: 1 - } - } - ]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + await conn.query('INSERT INTO t(i) VALUES (1)'); + await conn.query({ rowsAsArray: true, sql: 'SELECT i, i FROM t' }); + const res = await conn.query('SELECT i, i FROM t'); + assert.deepEqual(res, [ + { + t: { + i: 1 + } + } + ]); + conn.end(); }); it('simple do 1 with callback', function (done) { diff --git a/test/integration/test-placholders.js b/test/integration/test-placholders.js index ed026e20..b6bdf15d 100644 --- a/test/integration/test-placholders.js +++ b/test/integration/test-placholders.js @@ -91,8 +91,19 @@ describe('Placeholder', () => { .catch(done); }); - it('query undefined named parameter', function (done) { - const handleResult = function (err) { + it('query undefined named parameter', async function () { + const conn = await base.createConnection({ namedPlaceholders: true }); + await conn.query('DROP TABLE IF EXISTS undefinedParameter'); + await conn.query('CREATE TABLE undefinedParameter (id int, id2 int, id3 int)'); + try { + await conn.query('INSERT INTO undefinedParameter values (:param3, :param1, :param2)', { + param1: 1, + param3: 3, + param4: 4 + }); + conn.end(); + new Error('must have thrown error!'); + } catch (err) { assert.equal(err.errno, 45018); assert.equal(err.code, 'ER_PLACEHOLDER_UNDEFINED'); assert.equal(err.sqlState, 'HY000'); @@ -103,33 +114,8 @@ describe('Placeholder', () => { "sql: INSERT INTO undefinedParameter values (:param3, :param1, :param2) - parameters:{'param1':1,'param3':3,'param4':4}" ) ); - }; - - base - .createConnection({ namedPlaceholders: true }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS undefinedParameter') - .then(() => { - return conn.query('CREATE TABLE undefinedParameter (id int, id2 int, id3 int)'); - }) - .then(() => { - return conn.query('INSERT INTO undefinedParameter values (:param3, :param1, :param2)', { - param1: 1, - param3: 3, - param4: 4 - }); - }) - .then(() => { - done(new Error('must have thrown error!')); - }) - .catch((err) => { - handleResult(err); - conn.end(); - done(); - }); - }) - .catch(done); + conn.end(); + } }); it('query missing placeholder parameter', function (done) { @@ -232,30 +218,16 @@ describe('Placeholder', () => { .catch(done); }); - it('parameter last', (done) => { + it('parameter last', async () => { const value = "'`\\"; - base - .createConnection({ namedPlaceholders: true }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS parse') - .then(() => { - return conn.query('CREATE TABLE parse(t varchar(128))'); - }) - .then(() => { - return conn.query('INSERT INTO `parse` value (:val)', { val: value }); - }) - .then(() => { - return conn.query('select * from `parse` where t = :val', { val: value }); - }) - .then((res) => { - assert.strictEqual(res[0].t, value); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); + const conn = await base.createConnection({ namedPlaceholders: true }); + await conn.query('DROP TABLE IF EXISTS parse'); + await conn.query('CREATE TABLE parse(t varchar(128))'); + await conn.beginTransaction(); + await conn.query('INSERT INTO `parse` value (:val)', { val: value }); + const res = await conn.query('select * from `parse` where t = :val', { val: value }); + assert.strictEqual(res[0].t, value); + conn.end(); }); it('query with value without placeholder', function (done) { diff --git a/test/integration/test-query-values-in-sql.js b/test/integration/test-query-values-in-sql.js index eb3f3536..53f3da29 100644 --- a/test/integration/test-query-values-in-sql.js +++ b/test/integration/test-query-values-in-sql.js @@ -6,69 +6,38 @@ const { assert } = require('chai'); describe('sql template strings', () => { const value = "'`\\"; - it('query with parameters', (done) => { - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS query_with_parameter') - .then(() => { - return conn.query('CREATE TABLE query_with_parameter(t varchar(128))'); - }) - .then(() => { - return conn.query({ - sql: 'INSERT INTO query_with_parameter value (?)', - values: [value] - }); - }) - .then(() => { - return conn.query({ - sql: 'select * from query_with_parameter where t = ?', - values: [value] - }); - }) - .then((res) => { - assert.strictEqual(res[0].t, value); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); + it('query with parameters', async () => { + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS query_with_parameter'); + await conn.query('CREATE TABLE query_with_parameter(t varchar(128))'); + await conn.beginTransaction(); + await conn.query({ + sql: 'INSERT INTO query_with_parameter value (?)', + values: [value] + }); + const res = await conn.query({ + sql: 'select * from query_with_parameter where t = ?', + values: [value] + }); + assert.strictEqual(res[0].t, value); + conn.end(); }); - it('batch with parameters', (done) => { - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS batch_with_parameters') - .then(() => { - return conn.query('CREATE TABLE batch_with_parameters(t varchar(128))'); - }) - .then(() => { - return conn.batch({ - sql: 'INSERT INTO batch_with_parameters value (?)', - values: [value] - }); - }) - .then(() => { - return conn.query({ - sql: 'select * from batch_with_parameters where t = ?', - values: [value] - }); - }) - .then((res) => { - assert.strictEqual(res[0].t, value); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('batch with parameters', async () => { + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS batch_with_parameters'); + await conn.query('CREATE TABLE batch_with_parameters(t varchar(128))'); + await conn.beginTransaction(); + await conn.batch({ + sql: 'INSERT INTO batch_with_parameters value (?)', + values: [value] + }); + const res = await conn.query({ + sql: 'select * from batch_with_parameters where t = ?', + values: [value] + }); + assert.strictEqual(res[0].t, value); + conn.end(); }); it('callback query with parameters', (done) => { @@ -87,32 +56,34 @@ describe('sql template strings', () => { conn.end(); done(err); } else { - conn.query( - { sql: 'INSERT INTO callback_with_parameters value (?)', values: [value] }, - (err) => { - if (err) { - conn.end(); - done(err); - } else { - conn.query( - { - sql: 'select * from callback_with_parameters where t = ?', - values: [value] - }, - (err, res) => { - if (err) { - conn.end(); - done(err); - } else { - assert.strictEqual(res[0].t, value); - conn.end(); - done(); + conn.beginTransaction(() => { + conn.query( + { sql: 'INSERT INTO callback_with_parameters value (?)', values: [value] }, + (err) => { + if (err) { + conn.end(); + done(err); + } else { + conn.query( + { + sql: 'select * from callback_with_parameters where t = ?', + values: [value] + }, + (err, res) => { + if (err) { + conn.end(); + done(err); + } else { + assert.strictEqual(res[0].t, value); + conn.end(); + done(); + } } - } - ); + ); + } } - } - ); + ); + }); } }); } @@ -137,32 +108,37 @@ describe('sql template strings', () => { conn.end(); done(err); } else { - conn.batch( - { sql: 'INSERT INTO callback_batch_with_parameters value (?)', values: [value] }, - (err) => { - if (err) { - conn.end(); - done(err); - } else { - conn.query( - { - sql: 'select * from callback_batch_with_parameters where t = ?', - values: [value] - }, - (err, res) => { - if (err) { - conn.end(); - done(err); - } else { - assert.strictEqual(res[0].t, value); - conn.end(); - done(); + conn.beginTransaction(() => { + conn.batch( + { + sql: 'INSERT INTO callback_batch_with_parameters value (?)', + values: [value] + }, + (err) => { + if (err) { + conn.end(); + done(err); + } else { + conn.query( + { + sql: 'select * from callback_batch_with_parameters where t = ?', + values: [value] + }, + (err, res) => { + if (err) { + conn.end(); + done(err); + } else { + assert.strictEqual(res[0].t, value); + conn.end(); + done(); + } } - } - ); + ); + } } - } - ); + ); + }); } }); } diff --git a/test/integration/test-query.js b/test/integration/test-query.js index af8d89bb..2ae0f7b1 100644 --- a/test/integration/test-query.js +++ b/test/integration/test-query.js @@ -20,181 +20,90 @@ describe('basic query', () => { .catch(done); }); - it('parameter last', (done) => { + it('parameter last', async () => { const value = "'`\\"; - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS parse') - .then(() => { - return conn.query('CREATE TABLE parse(t varchar(128))'); - }) - .then(() => { - return conn.query('INSERT INTO `parse` value (?)', value); - }) - .then(() => { - return conn.query('select * from `parse` where t = ?', value); - }) - .then((res) => { - assert.strictEqual(res[0].t, value); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS parse'); + await conn.query('CREATE TABLE parse(t varchar(128))'); + await conn.beginTransaction(); + await conn.query('INSERT INTO `parse` value (?)', value); + const res = await conn.query('select * from `parse` where t = ?', value); + assert.strictEqual(res[0].t, value); + conn.end(); }); - it('array parameter', function (done) { - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS arrayParam') - .then(() => { - return conn.query('CREATE TABLE arrayParam (id int, val varchar(10))'); - }) - .then(() => { - return conn.query( - "INSERT INTO arrayParam VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')" - ); - }) - .then(() => { - return conn.query('SELECT * FROM arrayParam WHERE val IN (?)', [['b', 'c', 1]]); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { - id: 2, - val: 'b' - }, - { - id: 3, - val: 'c' - } - ]); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('array parameter', async function () { + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS arrayParam'); + await conn.query('CREATE TABLE arrayParam (id int, val varchar(10))'); + await conn.beginTransaction(); + await conn.query("INSERT INTO arrayParam VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')"); + const rows = await conn.query('SELECT * FROM arrayParam WHERE val IN (?)', [['b', 'c', 1]]); + assert.deepEqual(rows, [ + { + id: 2, + val: 'b' + }, + { + id: 3, + val: 'c' + } + ]); + conn.end(); }); - it('array parameter with null value', function (done) { - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS arrayParamNull') - .then(() => { - return conn.query('CREATE TABLE arrayParamNull (id int, val varchar(10))'); - }) - .then(() => { - return conn.query('INSERT INTO arrayParamNull VALUES (?)', [[1, null]]); - }) - .then(() => { - return conn.query('INSERT INTO arrayParamNull VALUES (?)', [[2, 'a']]); - }) - .then(() => { - return conn.query('SELECT * FROM arrayParamNull'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { - id: 1, - val: null - }, - { - id: 2, - val: 'a' - } - ]); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('array parameter with null value', async function () { + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS arrayParamNull'); + await conn.query('CREATE TABLE arrayParamNull (id int, val varchar(10))'); + await conn.beginTransaction(); + await conn.query('INSERT INTO arrayParamNull VALUES (?)', [[1, null]]); + await conn.query('INSERT INTO arrayParamNull VALUES (?)', [[2, 'a']]); + const rows = await conn.query('SELECT * FROM arrayParamNull'); + assert.deepEqual(rows, [ + { + id: 1, + val: null + }, + { + id: 2, + val: 'a' + } + ]); + conn.end(); }); - it('array parameter with null value with parenthesis', function (done) { - base - .createConnection({ arrayParenthesis: true }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS arrayParamNullParen') - .then(() => { - return conn.query('CREATE TABLE arrayParamNullParen (id int, val varchar(10))'); - }) - .then(() => { - return conn.query('INSERT INTO arrayParamNullParen VALUES ?', [[1, null]]); - }) - .then(() => { - return conn.query('INSERT INTO arrayParamNullParen VALUES ?', [[2, 'a']]); - }) - .then(() => { - return conn.query('SELECT * FROM arrayParamNullParen'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { - id: 1, - val: null - }, - { - id: 2, - val: 'a' - } - ]); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + it('array parameter with null value with parenthesis', async function () { + const conn = await base.createConnection({ arrayParenthesis: true }); + await conn.query('DROP TABLE IF EXISTS arrayParamNullParen'); + await conn.query('CREATE TABLE arrayParamNullParen (id int, val varchar(10))'); + await conn.beginTransaction(); + await conn.query('INSERT INTO arrayParamNullParen VALUES ?', [[1, null]]); + await conn.query('INSERT INTO arrayParamNullParen VALUES ?', [[2, 'a']]); + const rows = await conn.query('SELECT * FROM arrayParamNullParen'); + assert.deepEqual(rows, [ + { + id: 1, + val: null + }, + { + id: 2, + val: 'a' + } + ]); + conn.end(); }); - it('permitSetMultiParamEntries set', (done) => { + it('permitSetMultiParamEntries set', async () => { const jsonValue = { id: 1, val: 'test' }; - base - .createConnection({ permitSetMultiParamEntries: true }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS setTable') - .then(() => { - return conn.query('CREATE TABLE setTable(id int, val varchar(128))'); - }) - .then(() => { - return conn.query('INSERT INTO setTable SET ?', jsonValue); - }) - .then(() => { - return conn.query('select * from setTable'); - }) - .then((res) => { - assert.deepEqual(res[0], jsonValue); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + const conn = await base.createConnection({ permitSetMultiParamEntries: true }); + await conn.query('DROP TABLE IF EXISTS setTable'); + await conn.query('CREATE TABLE setTable (id int, val varchar(128))'); + await conn.beginTransaction(); + await conn.query('INSERT INTO setTable SET ?', jsonValue); + const res = await conn.query('select * from setTable'); + assert.deepEqual(res[0], jsonValue); + conn.end(); }); it('query with escape values', function (done) { @@ -301,7 +210,7 @@ describe('basic query', () => { .catch(done); }); - it('255 columns', (done) => { + it('255 columns', async () => { let table = 'CREATE TABLE myTable('; let insert = 'INSERT INTO myTable VALUES ('; let expRes = {}; @@ -317,51 +226,25 @@ describe('basic query', () => { table += ')'; insert += ')'; - base - .createConnection() - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS myTable') - .then(() => { - return conn.query(table); - }) - .then(() => { - return conn.query(insert); - }) - .then(() => { - return conn.query('SELECT * FROM myTable'); - }) - .then((res) => { - assert.deepEqual(res[0], expRes); - conn.end(); - done(); - }) - .catch((err) => { - conn.end(); - done(err); - }); - }) - .catch(done); + const conn = await base.createConnection(); + await conn.query('DROP TABLE IF EXISTS myTable'); + await conn.query(table); + await conn.beginTransaction(); + await conn.query(insert); + const res = await conn.query('SELECT * FROM myTable'); + assert.deepEqual(res[0], expRes); + conn.end(); }); - it('escape validation', function (done) { + it('escape validation', async function () { if (!base.utf8Collation()) this.skip(); - shareConn - .query('DROP TABLE IF EXISTS tt1') - .then(() => { - return shareConn.query('CREATE TABLE tt1 (id int, tt varchar(256)) CHARSET utf8mb4'); - }) - .then(() => { - return shareConn.query('INSERT INTO tt1 VALUES (?,?)', [1, 'jack\nkमस्']); - }) - .then(() => { - return shareConn.query('SELECT * FROM tt1'); - }) - .then((res) => { - assert.equal(res[0].tt, 'jack\nkमस्'); - done(); - }) - .catch(done); + await shareConn.query('DROP TABLE IF EXISTS tt1'); + await shareConn.query('CREATE TABLE tt1 (id int, tt varchar(256)) CHARSET utf8mb4'); + await shareConn.beginTransaction(); + await shareConn.query('INSERT INTO tt1 VALUES (?,?)', [1, 'jack\nkमस्']); + const res = await shareConn.query('SELECT * FROM tt1'); + assert.equal(res[0].tt, 'jack\nkमस्'); + shareConn.commit; }); it('permitSetMultiParamEntries escape ', function (done) { diff --git a/test/integration/test-typecast.js b/test/integration/test-typecast.js index 086f8819..8f78e474 100644 --- a/test/integration/test-typecast.js +++ b/test/integration/test-typecast.js @@ -93,7 +93,7 @@ describe('TypeCast', () => { .catch(done); }); - it('TINY(1) to boolean cast', function (done) { + it('TINY(1) to boolean cast', async function () { const tinyToBoolean = (column, next) => { if (column.type == 'TINY' && column.columnLength === 1) { const val = column.int(); @@ -101,36 +101,22 @@ describe('TypeCast', () => { } return next(); }; - base - .createConnection({ typeCast: tinyToBoolean }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS tinyToBool') - .then(() => { - return conn.query('CREATE TABLE tinyToBool(b1 TINYINT(1), b2 TINYINT(2))'); - }) - .then(() => { - return conn.query('INSERT INTO tinyToBool VALUES (0,0), (1,1), (2,2), (null,null)'); - }) - .then(() => { - return conn.query('SELECT * from tinyToBool'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { b1: false, b2: 0 }, - { b1: true, b2: 1 }, - { b1: false, b2: 2 }, - { b1: null, b2: null } - ]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); + const conn = await base.createConnection({ typeCast: tinyToBoolean }); + await conn.query('DROP TABLE IF EXISTS tinyToBool'); + await conn.query('CREATE TABLE tinyToBool(b1 TINYINT(1), b2 TINYINT(2))'); + await conn.beginTransaction(); + await conn.query('INSERT INTO tinyToBool VALUES (0,0), (1,1), (2,2), (null,null)'); + const rows = await conn.query('SELECT * from tinyToBool'); + assert.deepEqual(rows, [ + { b1: false, b2: 0 }, + { b1: true, b2: 1 }, + { b1: false, b2: 2 }, + { b1: null, b2: null } + ]); + conn.end(); }); - it('long cast', function (done) { + it('long cast', async function () { const longCast = (column, next) => { if (column.type == 'TINY' && column.columnLength === 1) { return column.long(); @@ -140,38 +126,24 @@ describe('TypeCast', () => { } return next(); }; - base - .createConnection({ typeCast: longCast }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS stupidCast') - .then(() => { - return conn.query('CREATE TABLE stupidCast(b1 TINYINT(1), b2 varchar(3))'); - }) - .then(() => { - return conn.query( - "INSERT INTO stupidCast VALUES (0,'0.1'), (1,'1.1')," + " (2,'2.2'), (null,null)" - ); - }) - .then(() => { - return conn.query('SELECT * from stupidCast'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { b1: 0, b2: 0.1 }, - { b1: 1, b2: 1.1 }, - { b1: 2, b2: 2.2 }, - { b1: null, b2: null } - ]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); + const conn = await base.createConnection({ typeCast: longCast }); + await conn.query('DROP TABLE IF EXISTS stupidCast'); + await conn.query('CREATE TABLE stupidCast(b1 TINYINT(1), b2 varchar(3))'); + await conn.beginTransaction(); + await conn.query( + "INSERT INTO stupidCast VALUES (0,'0.1'), (1,'1.1')," + " (2,'2.2'), (null,null)" + ); + const rows = await conn.query('SELECT * from stupidCast'); + assert.deepEqual(rows, [ + { b1: 0, b2: 0.1 }, + { b1: 1, b2: 1.1 }, + { b1: 2, b2: 2.2 }, + { b1: null, b2: null } + ]); + conn.end(); }); - it('date cast', function (done) { + it('date cast', async function () { const longCast = (column, next) => { if (column.type == 'VAR_STRING') { let da = column.date(); @@ -179,91 +151,62 @@ describe('TypeCast', () => { } return next(); }; - base - .createConnection({ typeCast: longCast }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS stupidCast') - .then(() => { - return conn.query('CREATE TABLE stupidCast(b1 varchar(100))'); - }) - .then(() => { - return conn.query( - "INSERT INTO stupidCast VALUES ('1999-01-31" + - " 12:13:14.000'), ('1999-01-31 12:16:15'), (null)" - ); - }) - .then(() => { - return conn.query('SELECT * from stupidCast'); - }) - .then((rows) => { - assert.deepEqual(rows, [{ b1: 13 }, { b1: 16 }, { b1: null }]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); + const conn = await base.createConnection({ typeCast: longCast }); + await conn.query('DROP TABLE IF EXISTS stupidCast'); + await conn.query('CREATE TABLE stupidCast(b1 varchar(100))'); + await conn.beginTransaction(); + await conn.query( + "INSERT INTO stupidCast VALUES ('1999-01-31" + + " 12:13:14.000'), ('1999-01-31 12:16:15'), (null)" + ); + const rows = await conn.query('SELECT * from stupidCast'); + assert.deepEqual(rows, [{ b1: 13 }, { b1: 16 }, { b1: null }]); + conn.end(); }); - it('geometry cast', function (done) { + it('geometry cast', async function () { const longCast = (column, next) => { if (column.type == 'BINARY') { return column.geometry(); } return next(); }; - base - .createConnection({ typeCast: longCast }) - .then((conn) => { - conn - .query('DROP TABLE IF EXISTS stupidCast') - .then(() => { - return conn.query('CREATE TABLE stupidCast(b1 POINT)'); - }) - .then(() => { - return conn.query('INSERT INTO stupidCast VALUES (?), (?),(null)', [ - { - type: 'Point', - coordinates: [10, 10] - }, - { - type: 'Point', - coordinates: [20, 10] - } - ]); - }) - .then(() => { - return conn.query('SELECT * from stupidCast'); - }) - .then((rows) => { - assert.deepEqual(rows, [ - { - b1: { - type: 'Point', - coordinates: [10, 10] - } - }, - { - b1: { - type: 'Point', - coordinates: [20, 10] - } - }, - { - b1: - shareConn.info.isMariaDB() && - shareConn.info.hasMinVersion(10, 5, 2) && - !process.env.MAXSCALE_TEST_DISABLE - ? { type: 'Point' } - : null - } - ]); - conn.end(); - done(); - }) - .catch(done); - }) - .catch(done); + const conn = await base.createConnection({ typeCast: longCast }); + await conn.query('DROP TABLE IF EXISTS stupidCast'); + await conn.query('CREATE TABLE stupidCast(b1 POINT)'); + await conn.query('INSERT INTO stupidCast VALUES (?), (?),(null)', [ + { + type: 'Point', + coordinates: [10, 10] + }, + { + type: 'Point', + coordinates: [20, 10] + } + ]); + const rows = await conn.query('SELECT * from stupidCast'); + assert.deepEqual(rows, [ + { + b1: { + type: 'Point', + coordinates: [10, 10] + } + }, + { + b1: { + type: 'Point', + coordinates: [20, 10] + } + }, + { + b1: + shareConn.info.isMariaDB() && + shareConn.info.hasMinVersion(10, 5, 2) && + !process.env.MAXSCALE_TEST_DISABLE + ? { type: 'Point' } + : null + } + ]); + conn.end(); }); }); From a22c211a8f31bdea3caadca4eb2eaf69cf48422b Mon Sep 17 00:00:00 2001 From: rusher Date: Fri, 4 Dec 2020 18:03:01 +0100 Subject: [PATCH 21/21] [misc] correcting CHANGELOG release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8dbdab62..2e4991f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ # Change Log -## [2.5.2](https://github.com/mariadb-corporation/mariadb-connector-nodejs/tree/2.5.2) (14 Dec 2020) +## [2.5.2](https://github.com/mariadb-corporation/mariadb-connector-nodejs/tree/2.5.2) (04 Dec 2020) [Full Changelog](https://github.com/mariadb-corporation/mariadb-connector-nodejs/compare/2.5.1...2.5.2) * [CONJS-151] bulk batch error (parameter truncation) #137