mirror of
https://github.com/knex/knex.git
synced 2025-07-23 00:42:45 +00:00

* Kill queries after timeout for PostgreSQL * Fix cancellation connection acquiring and test. * Fix releasing connection in case query cancellation fails * Add support for native enums on Postgres (#2632) Reference https://www.postgresql.org/docs/current/static/sql-createtype.html Closes #394 Signed-off-by: Will Soto <will.soto9@gmail.com> * Removal of 'skim' (#2520) * Allow overwriting log functions (#2625) * Example build of custom log functions * Handle logger object better for transactions * Adjust test to ignore sqlite warning message * Fixed onIn with empty values array (#2513) * Drop support for strong-oracle (#2487) * Remove babel-plugin-lodash (#2634) While in theory, this may reduce the bundle size, in practice it adds a ton of overhead during startup due to the number of additional requires. Bundle size also shouldn't matter for server side modules. * Add json support to the query builder for mysql (#2635) * Add json support to the query builder for mysql refs #1036 Based on #1902 * Clarify supported version * fix wrapIdentifier not being called in postgres alter column (#2612) * fix wrapIdentifier not being called in postgres alter column * add test for wrapIdentifier call in postgres alter column * add comment regarding issue * add issue & PR #'s in comment * Remove readable-stream and safe-buffer (#2640) * Add json support to the query builder for mysql refs #1036 Based on #1902 * Clarify supported version * Use native Buffer and Stream implementations * fixes 2630 (#2642) * Timeout errors shouldn't silently ignore the passed errors, but rather reject with original error. Fixes #2582 (#2626) * chore: add Node.js 10 (#2594) * chore: add Node.js 10 * chore: trigger new build * chore: update lockfile * chore: trigger new build * fix: use npm i instead of npm ci * Fix mssql driver crashing (#2637) * Run SQL Server tests locally running SQL server in docker * WIP mssql test stuff * Patched MSSQL driver to not crash knex anymore * Removed semicolon from rollback stmt for oracle (#2564) * Remove WebSQL dialect (#2647) * Add json support to the query builder for mysql refs #1036 Based on #1902 * Clarify supported version * Use native Buffer and Stream implementations * Remove WebSQL dialect * add homepage field to package.json (#2650) * Make the stream catch errors in the query (#2638) * Make the stream catch errors in the query * Fix another case in which stream doesnt emits error * Linter stuff * Remove setTimeout in tests * Make a test not to check the MySQL error code * Fix stream error catching for MariaDB and PostgreSQL * Fix stream error catching in Oracle * Throw the error after emitting it to the stream * Throw the error without instantiating a new Error * Various fixes to mssql dialect (#2653) * Fixed float type of mssql to be float * Many tests where postgres test was not actually ran at all * Migrations to be mssql compatible Mssql driver doesn't handle if multiple queries are sent to same transaction concurrently. * Prevented mssql failing when invalid schema builder was executed by accident Instead of trying to generate sql from broken schema calls, just make exception to leak before query compiling is started * Fixed mssql trx rollback to always throw an error Also modified some connection test query to be mssql compatible * Fixed various bugs from MSSQL driver to make tests run * Fixed mssql unique index to be compatible with other dialect implementations * Enable running mssql tests on CI * Test for #2588 * Updated tests to not be dependend on tables left from previous test rans * Trying to make mssql server work on travis * Updated changelog and version * Drop mariadb support (#2681) * Dropped support for mariasql * ESLint * Fixed docs to build again * Fix knex.initialize() and adds test (#2477) * Fix knex.initialize() and adds test * Fix knex.initialize() and adds test * Added test for reinitializing pool after destroy * Fixed destroy / reinitialization test * Fixed the fix * Implement missing schema support for mssql dialect * chore: Update dependencies. Remove estraverse (#2691) * Update dependencies. Remove estraverse * Fix compatibility with new Sinon * Increase mssql timeout * Normalized and validated driverNames of test db clients and fixed oracle test setup (#2692) * Normalized and validated driverNames of test db clients and fixed oracle test setup * Fixed failed queries from old query building tests which hadn't been ran in ages * Allow selecting node version which is used to run oracledb docker tests * Improved sql tester error messages * Fixed rest of the oracledb tests * Removed invalid flag from docker-compose * Print mssql logs if initialization fails * Fixed syntax error + final tests * Added restart of failure for mssql DB initialization to try again if server was not ready * Printout always mssql logs after container is started * Fixed wait time printing after trying to connect * Use npm run oracledb:test for testing oracle in travis * Add Prettier (#2697) * Add prettier * Run files through prettier * Istanbul -> NYC (#2700) * istanbul -> nyc * Update mocha * Enforce code coverage (#2702) * Enforce code coverage * Update coverage numbers with current values * version assignment on base class, copy on tx client, fix #2705 * Update changelog * v0.15.1 * Added build step to test script. Fixes #2541 (#2712) * Revert "Added build step to test script. Fixes #2541 (#2712)" (#2714) This reverts commit 90ed8db58053b859a6bdc45a17f2f510ce8a3411. * Allow oracle failures for now * Fix issue with select(0) (#2711) * Fix issue with select(0). Fixes #2658 * Refactor migrator (#2695) * Refactor migrator * Fix exports * Fix ESLint * Fix migrator * Fix reference to config * Split some more * Fix table builder * Fix argument order * Merge branch 'master' of https://github.com/tgriesser/knex into feature/2690-support-multiple-migration-dirs # Conflicts: # src/migrate/index.js # test/index.js # test/unit/migrate/migrator.js * Fix #2715 (#2721) * Fix #2715, explicitly set precision in datetime & timestamp * Allow for precision in knex.fn.now, mysql time * Add test for datetime with precision * Bump changelog * 0.15.2 * Fix issues with warnPromise when migration does not return a promise. Fixes #2725 (#2730) * Add tests for multiple union arguments with callbacks and builders (#2749) * Add tests for multiple union arguments * Add some callback and raw tests to union unit tests * Compile with before update so that bindings are put in correct order (#2733) * Fix join using builder withSchema. (#2744) * Use Datetime2 for MSSQL datetime + timestamp types (#2757) * Use Datetime2 for MSSQL datetime + timestamp types Datetime2 is now the recommended column type for new date work: https://docs.microsoft.com/en-us/sql/t-sql/data-types/datetime-transact-sql?view=sql-server-2017 * Add tests for MSSQL datetime2 changes * General/document breaking change (#2774) * Add changelog entry for a breaking change * Improve entry * Allow timestamp with timezone on mssql databases (#2724) * Allow timestamp with timezone on mssql databases * Change timestamp parameter to use object instead of boolean * Update dependencies (#2772) * Feature/2690: Multiple migration directories (#2735) * Implement support for multiple migration directories * Add tests for new functionality * Fix tape tests * Pass migration directory upwards * Fix multiple directory support * Fix bugs * Rollback correctly * Fix remaining tests * Address comments * #2758: Implement fail-fast logic for dialect resolution (#2776) * Implement fail-fast logic for dialect resolution, clean-up code around. * Remove method that was deprecated long time ago * Address additional comments * Try addressing comments * Set client explicitly * Fix compatibility with older Node versions * Use columnize instead of wrap in using(). (#2713) * Use columnize instead of wrap in using(). This is an attempt to fix #2136. Also added an integration test, couldn't find any existing. * Exclude MSSQL from test as it does not support .using * Change test to not use subquery * Change test * Introduced abstraction for getting migrations (#2775) * Introduced abstraction for getting migrations This would allow a webpack migration source which is compatible with bundling. * Fixed migration validation with custom migration source * Fixed issues after rebasing on muti directory PR * Renamed parameter and fixed error message * Addressed some PR comments * Finished comment * Moved filename extension filtering into fs-migrator * Added test showing in memory custom migration source * Cleaned up how to get config * Fixed failing test * Hopefully fix tests * Fix Node.js 10 support in tests * Test for correctly releasing cancel query connection
1163 lines
38 KiB
JavaScript
1163 lines
38 KiB
JavaScript
/*global describe, expect, it*/
|
|
/*eslint no-var:0, max-len:0 */
|
|
'use strict';
|
|
|
|
var Knex = require('../../../knex');
|
|
var _ = require('lodash');
|
|
var Promise = require('bluebird');
|
|
|
|
module.exports = function(knex) {
|
|
describe('Additional', function() {
|
|
describe('Custom response processing', () => {
|
|
before('setup custom response handler', () => {
|
|
knex.client.config.postProcessResponse = (response, queryContext) => {
|
|
response.callCount = response.callCount ? response.callCount + 1 : 1;
|
|
response.queryContext = queryContext;
|
|
return response;
|
|
};
|
|
});
|
|
|
|
after('restore client configuration', () => {
|
|
knex.client.config.postProcessResponse = null;
|
|
});
|
|
|
|
it('should process normal response', () => {
|
|
return knex('accounts')
|
|
.limit(1)
|
|
.then((res) => {
|
|
expect(res.callCount).to.equal(1);
|
|
});
|
|
});
|
|
|
|
it('should pass query context to the custom handler', () => {
|
|
return knex('accounts')
|
|
.queryContext('the context')
|
|
.limit(1)
|
|
.then((res) => {
|
|
expect(res.queryContext).to.equal('the context');
|
|
});
|
|
});
|
|
|
|
it('should process raw response', () => {
|
|
return knex.raw('select * from ??', ['accounts']).then((res) => {
|
|
expect(res.callCount).to.equal(1);
|
|
});
|
|
});
|
|
|
|
it('should pass query context for raw responses', () => {
|
|
return knex
|
|
.raw('select * from ??', ['accounts'])
|
|
.queryContext('the context')
|
|
.then((res) => {
|
|
expect(res.queryContext).to.equal('the context');
|
|
});
|
|
});
|
|
|
|
it('should process response done in transaction', () => {
|
|
return knex
|
|
.transaction((trx) => {
|
|
return trx('accounts')
|
|
.limit(1)
|
|
.then((res) => {
|
|
expect(res.callCount).to.equal(1);
|
|
return res;
|
|
});
|
|
})
|
|
.then((res) => {
|
|
expect(res.callCount).to.equal(1);
|
|
});
|
|
});
|
|
|
|
it('should pass query context for responses from transactions', () => {
|
|
return knex
|
|
.transaction((trx) => {
|
|
return trx('accounts')
|
|
.queryContext('the context')
|
|
.limit(1)
|
|
.then((res) => {
|
|
expect(res.queryContext).to.equal('the context');
|
|
return res;
|
|
});
|
|
})
|
|
.then((res) => {
|
|
expect(res.queryContext).to.equal('the context');
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('columnInfo with wrapIdentifier and postProcessResponse', () => {
|
|
before('setup hooks', () => {
|
|
knex.client.config.postProcessResponse = (response) => {
|
|
return _.mapKeys(response, (val, key) => {
|
|
return _.camelCase(key);
|
|
});
|
|
};
|
|
|
|
knex.client.config.wrapIdentifier = (id, origImpl) => {
|
|
return origImpl(_.snakeCase(id));
|
|
};
|
|
});
|
|
|
|
after('restore client configuration', () => {
|
|
knex.client.config.postProcessResponse = null;
|
|
knex.client.config.wrapIdentifier = null;
|
|
});
|
|
|
|
it('should work using camelCased table name', () => {
|
|
return knex('testTableTwo')
|
|
.columnInfo()
|
|
.then((res) => {
|
|
expect(Object.keys(res)).to.have.all.members([
|
|
'id',
|
|
'accountId',
|
|
'details',
|
|
'status',
|
|
'jsonData',
|
|
]);
|
|
});
|
|
});
|
|
|
|
it('should work using snake_cased table name', () => {
|
|
return knex('test_table_two')
|
|
.columnInfo()
|
|
.then((res) => {
|
|
expect(Object.keys(res)).to.have.all.members([
|
|
'id',
|
|
'accountId',
|
|
'details',
|
|
'status',
|
|
'jsonData',
|
|
]);
|
|
});
|
|
});
|
|
});
|
|
|
|
// TODO: This doesn't work on oracle yet.
|
|
if (['pg', 'mssql'].includes(knex.client.driverName)) {
|
|
describe('returning with wrapIdentifier and postProcessResponse`', () => {
|
|
let origHooks = {};
|
|
|
|
before('setup custom hooks', () => {
|
|
origHooks.postProcessResponse =
|
|
knex.client.config.postProcessResponse;
|
|
origHooks.wrapIdentifier = knex.client.config.wrapIdentifier;
|
|
|
|
// Add `_foo` to each identifier.
|
|
knex.client.config.postProcessResponse = (res) => {
|
|
if (Array.isArray(res)) {
|
|
return res.map((it) => {
|
|
if (typeof it === 'object') {
|
|
return _.mapKeys(it, (value, key) => {
|
|
return key + '_foo';
|
|
});
|
|
} else {
|
|
return it;
|
|
}
|
|
});
|
|
} else {
|
|
return res;
|
|
}
|
|
};
|
|
|
|
// Remove `_foo` from the end of each identifier.
|
|
knex.client.config.wrapIdentifier = (id) => {
|
|
return id.substring(0, id.length - 4);
|
|
};
|
|
});
|
|
|
|
after('restore hooks', () => {
|
|
knex.client.config.postProcessResponse =
|
|
origHooks.postProcessResponse;
|
|
knex.client.config.wrapIdentifier = origHooks.wrapIdentifier;
|
|
});
|
|
|
|
it('should return the correct column when a single property is given to returning', () => {
|
|
return knex('accounts_foo')
|
|
.insert({ balance_foo: 123 })
|
|
.returning('balance_foo')
|
|
.then((res) => {
|
|
expect(res).to.eql([123]);
|
|
});
|
|
});
|
|
|
|
it('should return the correct columns when multiple properties are given to returning', () => {
|
|
return knex('accounts_foo')
|
|
.insert({ balance_foo: 123, email_foo: 'foo@bar.com' })
|
|
.returning(['balance_foo', 'email_foo'])
|
|
.then((res) => {
|
|
expect(res).to.eql([
|
|
{ balance_foo: 123, email_foo: 'foo@bar.com' },
|
|
]);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
it('should forward the .get() function from bluebird', function() {
|
|
return knex('accounts')
|
|
.select()
|
|
.limit(1)
|
|
.then(function(accounts) {
|
|
var firstAccount = accounts[0];
|
|
return knex('accounts')
|
|
.select()
|
|
.limit(1)
|
|
.get(0)
|
|
.then(function(account) {
|
|
expect(account.id == firstAccount.id);
|
|
});
|
|
});
|
|
});
|
|
|
|
it('should forward the .mapSeries() function from bluebird', function() {
|
|
var asyncTask = function() {
|
|
return new Promise(function(resolve, reject) {
|
|
var output = asyncTask.num++;
|
|
setTimeout(function() {
|
|
resolve(output);
|
|
}, Math.random() * 200);
|
|
});
|
|
};
|
|
asyncTask.num = 1;
|
|
|
|
var returnedValues = [];
|
|
return knex('accounts')
|
|
.select()
|
|
.limit(3)
|
|
.mapSeries(function(account) {
|
|
return asyncTask().then(function(number) {
|
|
returnedValues.push(number);
|
|
});
|
|
})
|
|
.then(function() {
|
|
expect(returnedValues[0] == 1);
|
|
expect(returnedValues[1] == 2);
|
|
expect(returnedValues[2] == 3);
|
|
});
|
|
});
|
|
|
|
it('should forward the .delay() function from bluebird', function() {
|
|
var startTime = new Date().valueOf();
|
|
return knex('accounts')
|
|
.select()
|
|
.limit(1)
|
|
.delay(300)
|
|
.then(function(accounts) {
|
|
expect(new Date().valueOf() - startTime > 300);
|
|
});
|
|
});
|
|
|
|
it('should truncate a table with truncate', function() {
|
|
return knex('test_table_two')
|
|
.truncate()
|
|
.testSql(function(tester) {
|
|
tester('mysql', 'truncate `test_table_two`');
|
|
tester('pg', 'truncate "test_table_two" restart identity');
|
|
tester('pg-redshift', 'truncate "test_table_two"');
|
|
tester('sqlite3', 'delete from `test_table_two`');
|
|
tester('oracledb', 'truncate table "test_table_two"');
|
|
tester('mssql', 'truncate table [test_table_two]');
|
|
})
|
|
.then(() => {
|
|
return knex('test_table_two')
|
|
.select('*')
|
|
.then((resp) => {
|
|
expect(resp).to.have.length(0);
|
|
});
|
|
})
|
|
.then(() => {
|
|
// Insert new data after truncate and make sure ids restart at 1.
|
|
// This doesn't currently work on oracle, where the created sequence
|
|
// needs to be manually reset.
|
|
// On redshift, one would need to create an entirely new table and do
|
|
// `insert into ... (select ...); alter table rename...`
|
|
if (
|
|
/oracle/i.test(knex.client.driverName) ||
|
|
/redshift/i.test(knex.client.driverName)
|
|
) {
|
|
return;
|
|
}
|
|
return knex('test_table_two')
|
|
.insert({ status: 1 })
|
|
.then((res) => {
|
|
return knex('test_table_two')
|
|
.select('id')
|
|
.first()
|
|
.then((res) => {
|
|
expect(res).to.be.an('object');
|
|
expect(res.id).to.equal(1);
|
|
});
|
|
});
|
|
});
|
|
});
|
|
|
|
it('should allow raw queries directly with `knex.raw`', function() {
|
|
var tables = {
|
|
mysql: 'SHOW TABLES',
|
|
mysql2: 'SHOW TABLES',
|
|
pg:
|
|
"SELECT table_name FROM information_schema.tables WHERE table_schema='public'",
|
|
'pg-redshift':
|
|
"SELECT table_name FROM information_schema.tables WHERE table_schema='public'",
|
|
sqlite3: "SELECT name FROM sqlite_master WHERE type='table';",
|
|
oracledb: 'select TABLE_NAME from USER_TABLES',
|
|
mssql:
|
|
"SELECT table_name FROM information_schema.tables WHERE table_schema='dbo'",
|
|
};
|
|
return knex.raw(tables[knex.client.driverName]).testSql(function(tester) {
|
|
tester(knex.client.driverName, tables[knex.client.driverName]);
|
|
});
|
|
});
|
|
|
|
it('should allow using the primary table as a raw statement', function() {
|
|
expect(knex(knex.raw('raw_table_name')).toQuery()).to.equal(
|
|
'select * from raw_table_name'
|
|
);
|
|
});
|
|
|
|
it('should allow using .fn-methods to create raw statements', function() {
|
|
expect(knex.fn.now().prototype === knex.raw().prototype);
|
|
expect(knex.fn.now().toQuery()).to.equal('CURRENT_TIMESTAMP');
|
|
expect(knex.fn.now(6).toQuery()).to.equal('CURRENT_TIMESTAMP(6)');
|
|
});
|
|
|
|
it('gets the columnInfo', function() {
|
|
return knex('datatype_test')
|
|
.columnInfo()
|
|
.testSql(function(tester) {
|
|
tester(
|
|
'mysql',
|
|
'select * from information_schema.columns where table_name = ? and table_schema = ?',
|
|
null,
|
|
{
|
|
enum_value: {
|
|
defaultValue: null,
|
|
maxLength: 1,
|
|
nullable: true,
|
|
type: 'enum',
|
|
},
|
|
uuid: {
|
|
defaultValue: null,
|
|
maxLength: 36,
|
|
nullable: false,
|
|
type: 'char',
|
|
},
|
|
}
|
|
);
|
|
tester(
|
|
'pg',
|
|
'select * from information_schema.columns where table_name = ? and table_catalog = ? and table_schema = current_schema()',
|
|
null,
|
|
{
|
|
enum_value: {
|
|
defaultValue: null,
|
|
maxLength: null,
|
|
nullable: true,
|
|
type: 'text',
|
|
},
|
|
uuid: {
|
|
defaultValue: null,
|
|
maxLength: null,
|
|
nullable: false,
|
|
type: 'uuid',
|
|
},
|
|
}
|
|
);
|
|
tester(
|
|
'pg-redshift',
|
|
'select * from information_schema.columns where table_name = ? and table_catalog = ? and table_schema = current_schema()',
|
|
null,
|
|
{
|
|
enum_value: {
|
|
defaultValue: null,
|
|
maxLength: 255,
|
|
nullable: true,
|
|
type: 'character varying',
|
|
},
|
|
uuid: {
|
|
defaultValue: null,
|
|
maxLength: 36,
|
|
nullable: false,
|
|
type: 'character',
|
|
},
|
|
}
|
|
);
|
|
tester('sqlite3', 'PRAGMA table_info(`datatype_test`)', [], {
|
|
enum_value: {
|
|
defaultValue: null,
|
|
maxLength: null,
|
|
nullable: true,
|
|
type: 'text',
|
|
},
|
|
uuid: {
|
|
defaultValue: null,
|
|
maxLength: '36',
|
|
nullable: false,
|
|
type: 'char',
|
|
},
|
|
});
|
|
tester(
|
|
'oracledb',
|
|
"select * from xmltable( '/ROWSET/ROW'\n passing dbms_xmlgen.getXMLType('\n select char_col_decl_length, column_name, data_type, data_default, nullable\n from user_tab_columns where table_name = ''datatype_test'' ')\n columns\n CHAR_COL_DECL_LENGTH number, COLUMN_NAME varchar2(200), DATA_TYPE varchar2(106),\n DATA_DEFAULT clob, NULLABLE varchar2(1))",
|
|
[],
|
|
{
|
|
enum_value: {
|
|
defaultValue: null,
|
|
nullable: true,
|
|
maxLength: 1,
|
|
type: 'VARCHAR2',
|
|
},
|
|
uuid: {
|
|
defaultValue: null,
|
|
nullable: false,
|
|
maxLength: 36,
|
|
type: 'CHAR',
|
|
},
|
|
}
|
|
);
|
|
tester(
|
|
'mssql',
|
|
"select * from information_schema.columns where table_name = ? and table_catalog = ? and table_schema = 'dbo'",
|
|
['datatype_test', 'knex_test'],
|
|
{
|
|
enum_value: {
|
|
defaultValue: null,
|
|
maxLength: 100,
|
|
nullable: true,
|
|
type: 'nvarchar',
|
|
},
|
|
uuid: {
|
|
defaultValue: null,
|
|
maxLength: null,
|
|
nullable: false,
|
|
type: 'uniqueidentifier',
|
|
},
|
|
}
|
|
);
|
|
});
|
|
});
|
|
|
|
it('gets the columnInfo with columntype', function() {
|
|
return knex('datatype_test')
|
|
.columnInfo('uuid')
|
|
.testSql(function(tester) {
|
|
tester(
|
|
'mysql',
|
|
'select * from information_schema.columns where table_name = ? and table_schema = ?',
|
|
null,
|
|
{
|
|
defaultValue: null,
|
|
maxLength: 36,
|
|
nullable: false,
|
|
type: 'char',
|
|
}
|
|
);
|
|
tester(
|
|
'pg',
|
|
'select * from information_schema.columns where table_name = ? and table_catalog = ? and table_schema = current_schema()',
|
|
null,
|
|
{
|
|
defaultValue: null,
|
|
maxLength: null,
|
|
nullable: false,
|
|
type: 'uuid',
|
|
}
|
|
);
|
|
tester(
|
|
'pg-redshift',
|
|
'select * from information_schema.columns where table_name = ? and table_catalog = ? and table_schema = current_schema()',
|
|
null,
|
|
{
|
|
defaultValue: null,
|
|
maxLength: 36,
|
|
nullable: false,
|
|
type: 'character',
|
|
}
|
|
);
|
|
tester('sqlite3', 'PRAGMA table_info(`datatype_test`)', [], {
|
|
defaultValue: null,
|
|
maxLength: '36',
|
|
nullable: false,
|
|
type: 'char',
|
|
});
|
|
tester(
|
|
'oracledb',
|
|
"select * from xmltable( '/ROWSET/ROW'\n passing dbms_xmlgen.getXMLType('\n select char_col_decl_length, column_name, data_type, data_default, nullable\n from user_tab_columns where table_name = ''datatype_test'' ')\n columns\n CHAR_COL_DECL_LENGTH number, COLUMN_NAME varchar2(200), DATA_TYPE varchar2(106),\n DATA_DEFAULT clob, NULLABLE varchar2(1))",
|
|
[],
|
|
{
|
|
defaultValue: null,
|
|
maxLength: 36,
|
|
nullable: false,
|
|
type: 'CHAR',
|
|
}
|
|
);
|
|
tester(
|
|
'mssql',
|
|
"select * from information_schema.columns where table_name = ? and table_catalog = ? and table_schema = 'dbo'",
|
|
null,
|
|
{
|
|
defaultValue: null,
|
|
maxLength: null,
|
|
nullable: false,
|
|
type: 'uniqueidentifier',
|
|
}
|
|
);
|
|
});
|
|
});
|
|
|
|
it('#2184 - should properly escape table name for SQLite columnInfo', function() {
|
|
if (knex.client.driverName !== 'sqlite3') {
|
|
return;
|
|
}
|
|
|
|
return knex.schema
|
|
.dropTableIfExists('group')
|
|
.then(function() {
|
|
return knex.schema.createTable('group', function(table) {
|
|
table.integer('foo');
|
|
});
|
|
})
|
|
.then(function() {
|
|
return knex('group').columnInfo();
|
|
})
|
|
.then(function(columnInfo) {
|
|
expect(columnInfo).to.deep.equal({
|
|
foo: {
|
|
type: 'integer',
|
|
maxLength: null,
|
|
nullable: true,
|
|
defaultValue: null,
|
|
},
|
|
});
|
|
});
|
|
});
|
|
|
|
it('should allow renaming a column', function() {
|
|
var countColumn;
|
|
switch (knex.client.driverName) {
|
|
case 'oracledb':
|
|
countColumn = 'COUNT(*)';
|
|
break;
|
|
case 'mssql':
|
|
countColumn = '';
|
|
break;
|
|
default:
|
|
countColumn = 'count(*)';
|
|
break;
|
|
}
|
|
var count,
|
|
inserts = [];
|
|
_.times(40, function(i) {
|
|
inserts.push({
|
|
email: 'email' + i,
|
|
first_name: 'Test',
|
|
last_name: 'Data',
|
|
});
|
|
});
|
|
return knex('accounts')
|
|
.insert(inserts)
|
|
.then(function() {
|
|
return knex.count('*').from('accounts');
|
|
})
|
|
.then(function(resp) {
|
|
count = resp[0][countColumn];
|
|
return knex.schema
|
|
.table('accounts', function(t) {
|
|
t.renameColumn('about', 'about_col');
|
|
})
|
|
.testSql(function(tester) {
|
|
tester('mysql', ['show fields from `accounts` where field = ?']);
|
|
tester('pg', [
|
|
'alter table "accounts" rename "about" to "about_col"',
|
|
]);
|
|
tester('pg-redshift', [
|
|
'alter table "accounts" rename "about" to "about_col"',
|
|
]);
|
|
tester('sqlite3', ['PRAGMA table_info(`accounts`)']);
|
|
tester('oracledb', [
|
|
'DECLARE PK_NAME VARCHAR(200); IS_AUTOINC NUMBER := 0; BEGIN EXECUTE IMMEDIATE (\'ALTER TABLE "accounts" RENAME COLUMN "about" TO "about_col"\'); SELECT COUNT(*) INTO IS_AUTOINC from "USER_TRIGGERS" where trigger_name = \'accounts_autoinc_trg\'; IF (IS_AUTOINC > 0) THEN SELECT cols.column_name INTO PK_NAME FROM all_constraints cons, all_cons_columns cols WHERE cons.constraint_type = \'P\' AND cons.constraint_name = cols.constraint_name AND cons.owner = cols.owner AND cols.table_name = \'accounts\'; IF (\'about_col\' = PK_NAME) THEN EXECUTE IMMEDIATE (\'DROP TRIGGER "accounts_autoinc_trg"\'); EXECUTE IMMEDIATE (\'create or replace trigger "accounts_autoinc_trg" BEFORE INSERT on "accounts" for each row declare checking number := 1; begin if (:new."about_col" is null) then while checking >= 1 loop select "accounts_seq".nextval into :new."about_col" from dual; select count("about_col") into checking from "accounts" where "about_col" = :new."about_col"; end loop; end if; end;\'); end if; end if;END;',
|
|
]);
|
|
tester('mssql', ["exec sp_rename ?, ?, 'COLUMN'"]);
|
|
});
|
|
})
|
|
.then(function() {
|
|
return knex.count('*').from('accounts');
|
|
})
|
|
.then(function(resp) {
|
|
expect(resp[0][countColumn]).to.equal(count);
|
|
})
|
|
.then(function() {
|
|
return knex('accounts').select('about_col');
|
|
})
|
|
.then(function() {
|
|
return knex.schema.table('accounts', function(t) {
|
|
t.renameColumn('about_col', 'about');
|
|
});
|
|
})
|
|
.then(function() {
|
|
return knex.count('*').from('accounts');
|
|
})
|
|
.then(function(resp) {
|
|
expect(resp[0][countColumn]).to.equal(count);
|
|
});
|
|
});
|
|
|
|
it('should allow dropping a column', function() {
|
|
var countColumn;
|
|
switch (knex.client.driverName) {
|
|
case 'oracledb':
|
|
countColumn = 'COUNT(*)';
|
|
break;
|
|
case 'mssql':
|
|
countColumn = '';
|
|
break;
|
|
default:
|
|
countColumn = 'count(*)';
|
|
break;
|
|
}
|
|
var count;
|
|
return knex
|
|
.count('*')
|
|
.from('accounts')
|
|
.then(function(resp) {
|
|
count = resp[0][countColumn];
|
|
})
|
|
.then(function() {
|
|
return knex.schema
|
|
.table('accounts', function(t) {
|
|
t.dropColumn('first_name');
|
|
})
|
|
.testSql(function(tester) {
|
|
tester('mysql', ['alter table `accounts` drop `first_name`']);
|
|
tester('pg', ['alter table "accounts" drop column "first_name"']);
|
|
tester('pg-redshift', [
|
|
'alter table "accounts" drop column "first_name"',
|
|
]);
|
|
tester('sqlite3', ['PRAGMA table_info(`accounts`)']);
|
|
tester('oracledb', [
|
|
'alter table "accounts" drop ("first_name")',
|
|
]);
|
|
//tester('oracledb', ['alter table "accounts" drop ("first_name")']);
|
|
tester('mssql', [
|
|
'ALTER TABLE [accounts] DROP COLUMN [first_name]',
|
|
]);
|
|
});
|
|
})
|
|
.then(function() {
|
|
return knex
|
|
.select('*')
|
|
.from('accounts')
|
|
.first();
|
|
})
|
|
.then(function(resp) {
|
|
expect(_.keys(resp).sort()).to.eql([
|
|
'about',
|
|
'balance',
|
|
'created_at',
|
|
'email',
|
|
'id',
|
|
'last_name',
|
|
'logins',
|
|
'phone',
|
|
'updated_at',
|
|
]);
|
|
})
|
|
.then(function() {
|
|
return knex.count('*').from('accounts');
|
|
})
|
|
.then(function(resp) {
|
|
expect(resp[0][countColumn]).to.equal(count);
|
|
});
|
|
});
|
|
|
|
it('.timeout() should throw TimeoutError', function() {
|
|
var driverName = knex.client.driverName;
|
|
if (driverName === 'sqlite3') {
|
|
return;
|
|
} //TODO -- No built-in support for sleeps
|
|
if (/redshift/.test(driverName)) {
|
|
return;
|
|
}
|
|
var testQueries = {
|
|
pg: function() {
|
|
return knex.raw('SELECT pg_sleep(1)');
|
|
},
|
|
mysql: function() {
|
|
return knex.raw('SELECT SLEEP(1)');
|
|
},
|
|
mysql2: function() {
|
|
return knex.raw('SELECT SLEEP(1)');
|
|
},
|
|
mssql: function() {
|
|
return knex.raw("WAITFOR DELAY '00:00:01'");
|
|
},
|
|
oracledb: function() {
|
|
return knex.raw('begin dbms_lock.sleep(1); end;');
|
|
},
|
|
};
|
|
|
|
if (!testQueries.hasOwnProperty(driverName)) {
|
|
throw new Error('Missing test query for driver: ' + driverName);
|
|
}
|
|
|
|
var query = testQueries[driverName]();
|
|
|
|
return query
|
|
.timeout(200)
|
|
.then(function() {
|
|
expect(true).to.equal(false);
|
|
})
|
|
.catch(function(error) {
|
|
expect(_.pick(error, 'timeout', 'name', 'message')).to.deep.equal({
|
|
timeout: 200,
|
|
name: 'TimeoutError',
|
|
message:
|
|
'Defined query timeout of 200ms exceeded when running query.',
|
|
});
|
|
});
|
|
});
|
|
|
|
it('.timeout(ms, {cancel: true}) should throw TimeoutError and cancel slow query', function() {
|
|
var driverName = knex.client.driverName;
|
|
if (driverName === 'sqlite3') {
|
|
return;
|
|
} //TODO -- No built-in support for sleeps
|
|
if (/redshift/.test(driverName)) {
|
|
return;
|
|
}
|
|
|
|
// There's unexpected behavior caused by knex releasing a connection back
|
|
// to the pool because of a timeout when a long query is still running.
|
|
// A subsequent query will acquire the connection (still in-use) and hang
|
|
// until the first query finishes. Setting a sleep time longer than the
|
|
// mocha timeout exposes this behavior.
|
|
var testQueries = {
|
|
pg: function() {
|
|
return knex.raw('SELECT pg_sleep(10)');
|
|
},
|
|
mysql: function() {
|
|
return knex.raw('SELECT SLEEP(10)');
|
|
},
|
|
mysql2: function() {
|
|
return knex.raw('SELECT SLEEP(10)');
|
|
},
|
|
mssql: function() {
|
|
return knex.raw("WAITFOR DELAY '00:00:10'");
|
|
},
|
|
oracledb: function() {
|
|
return knex.raw('begin dbms_lock.sleep(10); end;');
|
|
},
|
|
};
|
|
|
|
if (!testQueries.hasOwnProperty(driverName)) {
|
|
throw new Error('Missing test query for driverName: ' + driverName);
|
|
}
|
|
|
|
var query = testQueries[driverName]();
|
|
|
|
function addTimeout() {
|
|
return query.timeout(200, { cancel: true });
|
|
}
|
|
|
|
// Only mysql/postgres query cancelling supported for now
|
|
if (
|
|
!_.startsWith(driverName, 'mysql') &&
|
|
!_.startsWith(driverName, 'pg')
|
|
) {
|
|
expect(addTimeout).to.throw(
|
|
'Query cancelling not supported for this dialect'
|
|
);
|
|
return;
|
|
}
|
|
|
|
var getProcessesQueries = {
|
|
pg: function() {
|
|
return knex.raw('SELECT * from pg_stat_activity');
|
|
},
|
|
mysql: function() {
|
|
return knex.raw('SHOW PROCESSLIST');
|
|
},
|
|
mysql2: function() {
|
|
return knex.raw('SHOW PROCESSLIST');
|
|
},
|
|
};
|
|
|
|
if (!getProcessesQueries.hasOwnProperty(driverName)) {
|
|
throw new Error('Missing test query for driverName: ' + driverName);
|
|
}
|
|
|
|
var getProcessesQuery = getProcessesQueries[driverName]();
|
|
|
|
return addTimeout()
|
|
.then(function() {
|
|
expect(true).to.equal(false);
|
|
})
|
|
.catch(function(error) {
|
|
expect(_.pick(error, 'timeout', 'name', 'message')).to.deep.equal({
|
|
timeout: 200,
|
|
name: 'TimeoutError',
|
|
message:
|
|
'Defined query timeout of 200ms exceeded when running query.',
|
|
});
|
|
|
|
// Ensure sleep command is removed.
|
|
// This query will hang if a connection gets released back to the pool
|
|
// too early.
|
|
// 50ms delay since killing query doesn't seem to have immediate effect to the process listing
|
|
return Promise.resolve()
|
|
.then()
|
|
.delay(50)
|
|
.then(function() {
|
|
return getProcessesQuery;
|
|
})
|
|
.then(function(results) {
|
|
var processes;
|
|
var sleepProcess;
|
|
|
|
if (_.startsWith(driverName, 'pg')) {
|
|
processes = results.rows;
|
|
sleepProcess = _.find(processes, { query: query.toString() });
|
|
} else {
|
|
processes = results[0];
|
|
sleepProcess = _.find(processes, {
|
|
Info: 'SELECT SLEEP(10)',
|
|
});
|
|
}
|
|
expect(sleepProcess).to.equal(undefined);
|
|
});
|
|
});
|
|
});
|
|
|
|
it('.timeout(ms, {cancel: true}) should throw error if cancellation cannot acquire connection', function() {
|
|
// Only mysql/postgres query cancelling supported for now
|
|
var driverName = knex.client.driverName;
|
|
if (
|
|
!_.startsWith(driverName, 'mysql') &&
|
|
!_.startsWith(driverName, 'pg')
|
|
) {
|
|
return;
|
|
}
|
|
|
|
// To make this test easier, I'm changing the pool settings to max 1.
|
|
// Also setting acquireTimeoutMillis to lower as not to wait the default time
|
|
var knexConfig = _.cloneDeep(knex.client.config);
|
|
knexConfig.pool.min = 0;
|
|
knexConfig.pool.max = 1;
|
|
knexConfig.pool.acquireTimeoutMillis = 100;
|
|
|
|
var knexDb = new Knex(knexConfig);
|
|
|
|
var testQueries = {
|
|
pg: function() {
|
|
return knexDb.raw('SELECT pg_sleep(10)');
|
|
},
|
|
mysql: function() {
|
|
return knexDb.raw('SELECT SLEEP(10)');
|
|
},
|
|
mysql2: function() {
|
|
return knexDb.raw('SELECT SLEEP(10)');
|
|
},
|
|
mssql: function() {
|
|
return knexDb.raw("WAITFOR DELAY '00:00:10'");
|
|
},
|
|
oracle: function() {
|
|
return knexDb.raw('begin dbms_lock.sleep(10); end;');
|
|
},
|
|
};
|
|
|
|
if (!testQueries.hasOwnProperty(driverName)) {
|
|
throw new Error('Missing test query for dialect: ' + driverName);
|
|
}
|
|
|
|
var query = testQueries[driverName]();
|
|
|
|
return query
|
|
.timeout(1, { cancel: true })
|
|
.then(function() {
|
|
throw new Error("Shouldn't have gotten here.");
|
|
})
|
|
.catch(function(error) {
|
|
expect(_.pick(error, 'timeout', 'name', 'message')).to.deep.equal({
|
|
timeout: 1,
|
|
name: 'TimeoutError',
|
|
message:
|
|
'After query timeout of 1ms exceeded, cancelling of query failed.',
|
|
});
|
|
})
|
|
.finally(() => knexDb.destroy());
|
|
});
|
|
|
|
it('.timeout(ms, {cancel: true}) should release connections after failing if connection cancellation throws an error', function() {
|
|
// Only mysql/postgres query cancelling supported for now
|
|
var driverName = knex.client.driverName;
|
|
if (!_.startsWith(driverName, 'pg')) {
|
|
return;
|
|
}
|
|
|
|
// To make this test easier, I'm changing the pool settings to max 1.
|
|
// Also setting acquireTimeoutMillis to lower as not to wait the default time
|
|
var knexConfig = _.cloneDeep(knex.client.config);
|
|
knexConfig.pool.min = 0;
|
|
knexConfig.pool.max = 2;
|
|
knexConfig.pool.acquireTimeoutMillis = 100;
|
|
|
|
var knexDb = new Knex(knexConfig);
|
|
|
|
var rawTestQueries = {
|
|
pg: (sleepSeconds) => `SELECT pg_sleep(${sleepSeconds})`,
|
|
};
|
|
|
|
if (!rawTestQueries.hasOwnProperty(driverName)) {
|
|
throw new Error('Missing test query for driverName: ' + driverName);
|
|
}
|
|
|
|
const getTestQuery = (sleepSeconds = 10) => {
|
|
const rawTestQuery = rawTestQueries[driverName](sleepSeconds);
|
|
return knexDb.raw(rawTestQuery);
|
|
};
|
|
|
|
const knexPrototype = Object.getPrototypeOf(knexDb.client);
|
|
const originalWrappedCancelQueryCall =
|
|
knexPrototype._wrappedCancelQueryCall;
|
|
knexPrototype._wrappedCancelQueryCall = (conn) => {
|
|
return knexPrototype.query(conn, {
|
|
method: 'raw',
|
|
sql: 'TestError',
|
|
});
|
|
};
|
|
|
|
const queryTimeout = 10;
|
|
const secondQueryTimeout = 11;
|
|
|
|
return getTestQuery()
|
|
.timeout(queryTimeout, { cancel: true })
|
|
.then(function() {
|
|
throw new Error("Shouldn't have gotten here.");
|
|
})
|
|
.catch(function(error) {
|
|
expect(_.pick(error, 'timeout', 'name', 'message')).to.deep.equal({
|
|
timeout: queryTimeout,
|
|
name: 'error',
|
|
message: `After query timeout of ${queryTimeout}ms exceeded, cancelling of query failed.`,
|
|
});
|
|
})
|
|
.then(() => {
|
|
knexPrototype._wrappedCancelQueryCall = originalWrappedCancelQueryCall;
|
|
return getTestQuery().timeout(secondQueryTimeout, { cancel: true });
|
|
})
|
|
.catch(function(error) {
|
|
expect(_.pick(error, 'timeout', 'name', 'message')).to.deep.equal({
|
|
timeout: secondQueryTimeout,
|
|
name: 'TimeoutError',
|
|
message: `Defined query timeout of ${secondQueryTimeout}ms exceeded when running query.`,
|
|
});
|
|
})
|
|
.finally(() => {
|
|
return knexDb.destroy();
|
|
});
|
|
});
|
|
|
|
it('Event: query-response', function() {
|
|
var queryCount = 0;
|
|
|
|
var onQueryResponse = function(response, obj, builder) {
|
|
queryCount++;
|
|
expect(response).to.be.an('array');
|
|
expect(obj).to.be.an('object');
|
|
expect(obj.__knexUid).to.be.a('string');
|
|
expect(obj.__knexQueryUid).to.be.a('string');
|
|
expect(builder).to.be.an('object');
|
|
};
|
|
knex.on('query-response', onQueryResponse);
|
|
|
|
return knex('accounts')
|
|
.select()
|
|
.on('query-response', onQueryResponse)
|
|
.then(function() {
|
|
return knex.transaction(function(tr) {
|
|
return tr('accounts')
|
|
.select()
|
|
.on('query-response', onQueryResponse); //Transactions should emit the event as well
|
|
});
|
|
})
|
|
.then(function() {
|
|
knex.removeListener('query-response', onQueryResponse);
|
|
expect(queryCount).to.equal(4);
|
|
});
|
|
});
|
|
|
|
it('Event: does not duplicate listeners on a copy with user params', function() {
|
|
var queryCount = 0;
|
|
|
|
var onQueryResponse = function(response, obj, builder) {
|
|
queryCount++;
|
|
expect(response).to.be.an('array');
|
|
expect(obj).to.be.an('object');
|
|
expect(obj.__knexUid).to.be.a('string');
|
|
expect(obj.__knexQueryUid).to.be.a('string');
|
|
expect(builder).to.be.an('object');
|
|
};
|
|
knex.on('query-response', onQueryResponse);
|
|
const knexCopy = knex.withUserParams({});
|
|
|
|
return knexCopy('accounts')
|
|
.select()
|
|
.on('query-response', onQueryResponse)
|
|
.then(function() {
|
|
return knexCopy.transaction(function(tr) {
|
|
return tr('accounts')
|
|
.select()
|
|
.on('query-response', onQueryResponse); //Transactions should emit the event as well
|
|
});
|
|
})
|
|
.then(function() {
|
|
expect(Object.keys(knex._events).length).to.equal(1);
|
|
expect(Object.keys(knexCopy._events).length).to.equal(0);
|
|
knex.removeListener('query-response', onQueryResponse);
|
|
expect(Object.keys(knex._events).length).to.equal(0);
|
|
expect(queryCount).to.equal(4);
|
|
});
|
|
});
|
|
|
|
it('Event: query-error', function() {
|
|
var queryCountKnex = 0;
|
|
var queryCountBuilder = 0;
|
|
var onQueryErrorKnex = function(error, obj) {
|
|
queryCountKnex++;
|
|
expect(obj).to.be.an('object');
|
|
expect(obj.__knexUid).to.be.a('string');
|
|
expect(obj.__knexQueryUid).to.be.a('string');
|
|
expect(error).to.be.an('error');
|
|
};
|
|
|
|
var onQueryErrorBuilder = function(error, obj) {
|
|
queryCountBuilder++;
|
|
expect(obj).to.be.an('object');
|
|
expect(obj.__knexUid).to.be.a('string');
|
|
expect(obj.__knexQueryUid).to.be.a('string');
|
|
expect(error).to.be.an('error');
|
|
};
|
|
|
|
knex.on('query-error', onQueryErrorKnex);
|
|
|
|
return knex
|
|
.raw('Broken query')
|
|
.on('query-error', onQueryErrorBuilder)
|
|
.then(function() {
|
|
expect(true).to.equal(false); //Should not be resolved
|
|
})
|
|
.catch(function() {
|
|
knex.removeListener('query-error', onQueryErrorKnex);
|
|
knex.removeListener('query-error', onQueryErrorBuilder);
|
|
expect(queryCountBuilder).to.equal(1);
|
|
expect(queryCountKnex).to.equal(1);
|
|
});
|
|
});
|
|
|
|
it('Event: start', function() {
|
|
return knex('accounts')
|
|
.insert({ last_name: 'Start event test' })
|
|
.then(function() {
|
|
var queryBuilder = knex('accounts').select();
|
|
|
|
queryBuilder.on('start', function(builder) {
|
|
//Alter builder prior to compilation
|
|
//Select only one row
|
|
builder.where('last_name', 'Start event test').first();
|
|
});
|
|
|
|
return queryBuilder;
|
|
})
|
|
.then(function(row) {
|
|
expect(row).to.exist;
|
|
expect(row.last_name).to.equal('Start event test');
|
|
});
|
|
});
|
|
|
|
it("Event 'query' should not emit native sql string", function() {
|
|
var builder = knex('accounts')
|
|
.where('id', 1)
|
|
.select();
|
|
|
|
builder.on('query', function(obj) {
|
|
var native = builder.toSQL().toNative().sql;
|
|
var sql = builder.toSQL().sql;
|
|
|
|
//Only assert if they diff to begin with.
|
|
//IE Maria does not diff
|
|
if (native !== sql) {
|
|
expect(obj.sql).to.not.equal(builder.toSQL().toNative().sql);
|
|
expect(obj.sql).to.equal(builder.toSQL().sql);
|
|
}
|
|
});
|
|
|
|
return builder;
|
|
});
|
|
|
|
describe('async stack traces', function() {
|
|
before(() => {
|
|
knex.client.config.asyncStackTraces = true;
|
|
});
|
|
after(() => {
|
|
delete knex.client.config.asyncStackTraces;
|
|
});
|
|
it('should capture stack trace on query builder instantiation', () => {
|
|
return knex('some_nonexisten_table')
|
|
.select()
|
|
.catch((err) => {
|
|
expect(err.stack.split('\n')[1]).to.match(
|
|
/at Function\.queryBuilder \(/
|
|
); // the index 1 might need adjustment if the code is refactored
|
|
expect(typeof err.originalStack).to.equal('string');
|
|
});
|
|
});
|
|
it('should capture stack trace on raw query', () => {
|
|
return knex.raw('select * from some_nonexisten_table').catch((err) => {
|
|
expect(err.stack.split('\n')[2]).to.match(/at Function\.raw \(/); // the index 2 might need adjustment if the code is refactored
|
|
expect(typeof err.originalStack).to.equal('string');
|
|
});
|
|
});
|
|
it('should capture stack trace on schema builder', () => {
|
|
return knex.schema
|
|
.renameTable('some_nonexisten_table', 'whatever')
|
|
.catch((err) => {
|
|
expect(err.stack.split('\n')[1]).to.match(/client\.schemaBuilder/); // the index 1 might need adjustment if the code is refactored
|
|
expect(typeof err.originalStack).to.equal('string');
|
|
});
|
|
});
|
|
});
|
|
|
|
it('Overwrite knex.logger functions using config', () => {
|
|
var knexConfig = _.clone(knex.client.config);
|
|
|
|
var callCount = 0;
|
|
var assertCall = function(expectedMessage, message) {
|
|
expect(message).to.equal(expectedMessage);
|
|
callCount++;
|
|
};
|
|
|
|
knexConfig.log = {
|
|
warn: assertCall.bind(null, 'test'),
|
|
error: assertCall.bind(null, 'test'),
|
|
debug: assertCall.bind(null, 'test'),
|
|
deprecate: assertCall.bind(
|
|
null,
|
|
'test is deprecated, please use test2'
|
|
),
|
|
};
|
|
|
|
//Sqlite warning message
|
|
knexConfig.useNullAsDefault = true;
|
|
|
|
var knexDb = new Knex(knexConfig);
|
|
|
|
knexDb.client.logger.warn('test');
|
|
knexDb.client.logger.error('test');
|
|
knexDb.client.logger.debug('test');
|
|
knexDb.client.logger.deprecate('test', 'test2');
|
|
|
|
expect(callCount).to.equal(4);
|
|
});
|
|
});
|
|
};
|