feat: Created a mini nodeJS server with NewMan for testing without PostMan GUI.

This will mimic a run in a CD/CI environment or docker container.
This commit is contained in:
Simon Priet
2021-09-08 14:01:19 +02:00
parent 5fbd7c88fa
commit e69a613a37
5610 changed files with 740417 additions and 3 deletions

64
node_modules/newman/lib/config/index.js generated vendored Normal file
View File

@@ -0,0 +1,64 @@
var _ = require('lodash'),
async = require('async'),
env = require('./process-env'),
rcfile = require('./rc-file');
/**
* Reads configuration from config file, environment variables and CLI arguments. The CLI arguments override environment
* variables and environment variables override the configuration read from a file.
*
* @param {Object} overrides - Configuration overrides (these usually come from the CLI).
* @param {Object} options - The wrapper object of settings used for selective configuration loading.
* @param {String} options.command - Command name. Used for loading the required options from the config file.
* @param {Boolean=} options.ignoreRcFile - If true, the RC file is ignored.
* @param {Boolean=} options.ignoreProcessEnvironment - If true, the process environment variables are ignored.
* @param {Object=} options.loaders - Custom loaders for specific configuration options.
* @param {Function} callback - Is called after merging values from the overrides with the values from the rc file and
* environment variables.
* @returns {*}
*/
module.exports.get = (overrides, options, callback) => {
!callback && _.isFunction(options) && (callback = options, options = {});
var loaders = options.loaders,
commonOptions = _.pick(overrides, ['postmanApiKey']);
async.waterfall([
// Load RC Files.
!options.ignoreRcFile ? rcfile.load : (cb) => {
return cb(null, {});
},
// Load Process Environment overrides
(fileOptions, cb) => {
fileOptions[options.command] && (fileOptions = fileOptions[options.command]);
return cb(null, _.merge(fileOptions, options.ignoreProcessEnvironment ? {} : env));
}
], (err, options) => {
if (err) {
return callback(err);
}
options = _.mergeWith({}, options, overrides, (dest, src) => {
// If the newer value is a null, do not override it.
return (src === null) ? dest : undefined;
});
if (_.isEmpty(loaders)) {
return callback(null, options);
}
// sanitize environment option
if (!options.environment) {
options.environment = {};
}
// sanitize globals option
if (!options.globals) {
options.globals = {};
}
async.mapValues(options, (value, name, cb) => {
return (value && _.isFunction(loaders[name])) ? loaders[name](value, commonOptions, cb) : cb(null, value);
}, callback);
});
};

3
node_modules/newman/lib/config/process-env.js generated vendored Normal file
View File

@@ -0,0 +1,3 @@
var envConfig = {}; // todo: read NEWMAN_* variables from process.env
module.exports = envConfig;

59
node_modules/newman/lib/config/rc-file.js generated vendored Normal file
View File

@@ -0,0 +1,59 @@
/* eslint-disable no-process-env */
var _ = require('lodash'),
fs = require('fs'),
join = require('path').join,
async = require('async'),
util = require('../util'),
liquidJSON = require('liquid-json'),
/**
* Name of the directory that contains the file denoted by FILE_NAME.
*
* @type {String}
*/
POSTMAN_CONFIG_DIR = 'postman',
/**
* Name of the file that contains Newman compliant confguration information.
*
* @type {String}
*/
FILE_NAME = 'newmanrc';
/**
* Configuration loader to acquire run settings from a file present in the home directory: POSTMAN_CONFIG_DIR/FILE_NAME.
*
* @param {Function} callback - The callback function invoked to mark the completion of the config loading routine.
* @returns {*}
*/
module.exports.load = (callback) => {
var iswin = (/^win/).test(process.platform),
home = iswin ? process.env.USERPROFILE : process.env.HOME,
configFiles = [];
!iswin && configFiles.push(join('/etc', POSTMAN_CONFIG_DIR, FILE_NAME));
home && configFiles.push(join(home, '.' + POSTMAN_CONFIG_DIR, FILE_NAME));
configFiles.push(join(process.cwd(), '.' + FILE_NAME));
async.mapSeries(configFiles, (path, cb) => {
fs.readFile(path, (err, data) => {
if (err) {
return cb(null, {}); // err masked to avoid overpopulating terminal with missing .newmanrc messages
}
data && data.toString && (data = data.toString(util.detectEncoding(data)).trim());
try {
return cb(null, liquidJSON.parse(data));
}
catch (e) {
return cb(_.set(e, 'help', `The file at ${path} contains invalid data.`));
}
});
}, (err, files) => {
if (err) {
return callback(err);
}
return callback(null, _.merge.apply(this, files));
});
};

3
node_modules/newman/lib/index.js generated vendored Normal file
View File

@@ -0,0 +1,3 @@
module.exports = {
run: require('./run')
};

30
node_modules/newman/lib/node-version-check/index.js generated vendored Normal file
View File

@@ -0,0 +1,30 @@
var semver = require('semver'),
colors = require('colors/safe'),
pkg = require('../../package.json'),
/**
* The required node version from package.json.
*
* @type {String}
* @readOnly
*/
requiredNodeVersion = pkg && pkg.engines && pkg.engines.node,
/**
* The current node version as detected from running process.
*
* @type {String}
* @readOnly
*/
currentNodeVersion = process && process.version;
// if either current or required version is not detected, we bail out
if (!(requiredNodeVersion && currentNodeVersion)) {
return;
}
// we check semver satisfaction and throw error on mismatch
if (!semver.satisfies(currentNodeVersion, requiredNodeVersion)) {
console.error([colors.red('newman:'), 'required node version', requiredNodeVersion].join(' '));
process.exit(1);
}

149
node_modules/newman/lib/print/index.js generated vendored Normal file
View File

@@ -0,0 +1,149 @@
var format = require('util').format,
cliUtils = require('../reporters/cli/cli-utils'),
SPC = ' ',
BS = '\b',
LF = '\n',
WAIT_FRAMES = (/^win/).test(process.platform) ?
['\u2015', '\\', '|', '/'] :
['⠄', '⠆', '⠇', '⠋', '⠙', '⠸', '⠰', '⠠', '⠰', '⠸', '⠙', '⠋', '⠇', '⠆'],
WAIT_FRAMES_SIZE = WAIT_FRAMES.length - 1,
WAIT_FRAMERATE = 100,
print;
/**
* Function that prints to stdout using standard NodeJS util.format, without end newline.
*
* @returns {print} - The result of context bound call to the run context, with all arguments passed.
* @chainable
*/
print = function () {
return print.print.apply(this, arguments);
};
/**
* Function that prints to stdout using standard NodeJS util.format, without end newline.
*
* @returns {print} - The updated print module wrapper, with unwait and unbuffer methods invoked.
* @chainable
*/
print.print = function () {
print.waiting && print.unwait();
print._buffer && print.unbuffer();
process.stdout.write(format.apply(this, arguments));
return print;
};
/**
* Print with a line feed at the end.
*
* @returns {print} - The updated print module wrapper, with unwait and unbuffer methods invoked.
* @chainable
*/
print.lf = function () {
print.waiting && print.unwait();
print._buffer && print.unbuffer();
process.stdout.write(format.apply(this, arguments) + LF);
return print;
};
// store the starting frame during wait
print._waitPosition = 0;
/**
* Draw a spinner until next print statement is received.
*
* @param {Function=} color - Optional color function from `colors` module.
* @returns {print} - The print module wrapper, with a key set as a setInterval label.
* @chainable
*/
print.wait = function (color) {
print.unwait();
if (cliUtils.noTTY()) {
return print;
}
process.stdout.write(SPC);
print.waiting = setInterval(function () {
process.stdout.write(BS +
(color ? color(WAIT_FRAMES[print._waitPosition++]) : WAIT_FRAMES[print._waitPosition++]));
(print._waitPosition > WAIT_FRAMES_SIZE) && (print._waitPosition = 0); // move frame
}, WAIT_FRAMERATE);
return print;
};
/**
* Stops a running spinner on CLI. It is automatically taken care of in most cases.
*
* @returns {print} - Returns the updated print module wrapper, with the cleared waiting label.
* @chainable
* @see print.wait
*/
print.unwait = function () {
if (print.waiting) {
print.waiting = clearInterval(print.waiting);
print._waitPosition = 0;
process.stdout.write('\b');
}
return print;
};
print._buffer = undefined;
/**
* Prints a message between start and end text. Consequent buffer calls does not print the start text and any other
* unbuffered call or a delay of time prints the end text.
*
* @param {String} startText - The text to begin printing with.
* @param {String} endText - The text that marks the end of the print routine.
* @returns {print} - The update print module wrapper with internal buffer and buffering members redefined.
* @chainable
*/
print.buffer = function (startText, endText) {
(print._buffer === undefined) && process.stdout.write(startText);
print._buffer = endText;
print._buferring && (print._buferring = clearTimeout(print._buferring));
print._buferring = setTimeout(print.unbuffer, 500);
process.stdout.write(format.apply(this, Array.prototype.splice.call(arguments, 2)));
return print;
};
/**
* Prints text without flushing the buffer.
*
* @returns {print} - The update print module wrapper with unwait called.
* @chainable
* @see print.buffer
*/
print.nobuffer = function () {
print.unwait();
process.stdout.write(format.apply(this, arguments));
return print;
};
/**
* Flushes the buffer.
*
* @returns {print} - The print module wrapper with internal private members: buffer, and buffering redefined.
* @chainable
* @see print.buffer
*/
print.unbuffer = function () {
print._buferring && (print._buferring = clearTimeout(print._buferring));
if (print._buffer) {
process.stdout.write(print._buffer);
print._buffer = undefined;
}
return print;
};
module.exports = print;

View File

@@ -0,0 +1,79 @@
var IS_WINDOWS = (/^win/).test(process.platform),
subsets,
symbols;
/**
* A set of symbol groups for use in different situations: regular, windows friendly unicode, and plain text.
*
* @type {Object}
*/
subsets = {
regular: {
console: {
top: '┌',
middle: '│',
bottom: '└'
},
dot: '.',
folder: '❏',
root: '→',
sub: '↳',
ok: '✓',
error: '✖',
star: '★',
up: '↑',
down: '↓'
},
encoded: {
console: {
top: '\u250C',
middle: '\u2502',
bottom: '\u2514'
},
dot: '.',
folder: '\u25A1',
root: '\u2192',
sub: '\u2514',
ok: '\u221A',
error: '\u00D7',
star: '\u2605',
up: '\u2191',
down: '\u2193'
},
plainText: {
console: {
top: '-',
middle: '|',
bottom: '-'
},
dot: '.',
folder: 'Folder',
root: 'Root',
sub: 'Sub-folder',
ok: 'Pass',
error: 'Fail',
star: '*',
up: '^',
down: 'v'
}
};
/**
* A method that picks the appropriate set of CLI report symbols under a given set of run conditions.
*
* @param {Boolean} disableUnicode - A flag to force plain text equivalents for CLI symbols if set to true.
* @returns {Object} - The right set of symbols from subsets for the given conditions.
* @todo Add additional parameter related to temp file read - writes
*/
symbols = function (disableUnicode) {
if (disableUnicode) {
return subsets.plainText;
}
if (IS_WINDOWS) { // modify symbols for windows platforms
return subsets.encoded;
}
return subsets.regular;
};
module.exports = symbols;

185
node_modules/newman/lib/reporters/cli/cli-utils.js generated vendored Normal file
View File

@@ -0,0 +1,185 @@
var inspect = require('util').inspect,
wrap = require('word-wrap'),
symbols = require('./cli-utils-symbols'),
cliUtils;
// set styling for inspect options
inspect.styles.string = 'grey';
inspect.styles.name = 'white';
cliUtils = {
/**
* A helper method that picks the right set of symbols for the given set of run conditions.
*
* @type {Function}
*/
symbols: symbols,
/**
* A set of blank CLI table symbols (default).
*
* @type {Object}
*/
cliTableTemplate_Blank: {
top: '',
'top-mid': '',
'top-left': '',
'top-right': '',
bottom: '',
'bottom-mid': '',
'bottom-left': '',
'bottom-right': '',
middle: '',
mid: ' ',
'mid-mid': '',
'mid-left': '',
'mid-right': '',
left: '',
'left-mid': '',
'left-left': '',
'left-right': '',
right: '',
'right-mid': '',
'right-left': '',
'right-right': ''
},
/**
* A set of fallback CLI table construction symbols, used when unicode has been disabled.
*
* @type {Object}
*/
cliTableTemplateFallback: {
top: '-',
'top-mid': '-',
'top-left': '-',
'top-right': '-',
bottom: '-',
'bottom-mid': '-',
'bottom-left': '-',
'bottom-right': '-',
middle: '|',
mid: '-',
'mid-mid': '+',
'mid-left': '-',
'mid-right': '-',
left: '|',
'left-mid': '-',
'left-left': '-',
'left-right': '-',
right: '|',
'right-mid': '-',
'right-left': '-',
'right-right': '-'
},
/**
* A CLI utility helper method that perfoms left padding on an input string.
*
* @param {String} nr - The string to be padded.
* @param {Number} n - The length of the field, in which to left pad the input string.
* @param {String=} str - An optional string used for padding the input string. Defaults to '0'.
* @returns {String} - The resultant left padded string.
*/
padLeft: function (nr, n, str) {
return Array(n - String(nr).length + 1).join(str || '0') + nr;
},
/**
* A CLI utility helper method that checks for the non TTY compliance of the current run environment.
*
* color: | noTTY:
* 'on' -> false
* 'off' -> true
* otherwise -> Based on isTTY.
*
* @param {String} color - A flag to indicate usage of the --color option.
* @returns {Boolean} - A boolean value depicting the result of the noTTY check.
*/
noTTY: function (color) {
return (color === 'off') || (color !== 'on') && (!process.stdout.isTTY);
},
/**
* A CLI utility helper method that generates a color inspector function for CLI reports.
*
* @param {Object} runOptions - The set of run options acquired via the runner.
* @returns {Function} - A function to perform utils.inspect, given a sample item, under pre-existing options.
*/
inspector: function (runOptions) {
var dimension = cliUtils.dimension(),
options = {
depth: 25,
maxArrayLength: 100, // only supported in Node v6.1.0 and up: https://github.com/nodejs/node/pull/6334
colors: !cliUtils.noTTY(runOptions.color),
// note that similar dimension calculation is in utils.wrapper
// only supported in Node v6.3.0 and above: https://github.com/nodejs/node/pull/7499
breakLength: ((dimension.exists && (dimension.width > 20)) ? dimension.width : 60) - 16
};
return function (item) {
return inspect(item, options);
};
},
/**
* A CLI utility helper method to provide content wrapping functionality for CLI reports.
*
* @returns {Function} - A sub-method to wrap content, given a piece of text, and indent value.
*/
wrapper: function () {
var dimension = cliUtils.dimension(),
// note that similar dimension calculation is in utils.wrapper
width = ((dimension.exists && (dimension.width > 20)) ? dimension.width : 60) - 6;
return function (text, indent) {
return wrap(text, {
indent: indent,
width: width,
cut: true
});
};
},
/**
* A CLI utility helper method to compute and scae the size of the CLI table to be displayed.
*
* @returns {Object} - A set of properties: width, height, and TTY existence.
*/
dimension: function () {
var tty,
width,
height;
try { tty = require('tty'); }
catch (e) { tty = null; }
if (tty && tty.isatty(1) && tty.isatty(2)) {
if (process.stdout.getWindowSize) {
width = process.stdout.getWindowSize(1)[0];
height = process.stdout.getWindowSize(1)[1];
}
else if (tty.getWindowSize) {
width = tty.getWindowSize()[1];
height = tty.getWindowSize()[0];
}
else if (process.stdout.columns && process.stdout.rows) {
height = process.stdout.rows;
width = process.stdout.columns;
}
}
return {
exists: !(Boolean(process.env.CI) || !process.stdout.isTTY), // eslint-disable-line no-process-env
width: width,
height: height
};
}
};
module.exports = cliUtils;

566
node_modules/newman/lib/reporters/cli/index.js generated vendored Normal file
View File

@@ -0,0 +1,566 @@
var _ = require('lodash'),
sdk = require('postman-collection'),
colors = require('colors/safe'),
Table = require('cli-table3'),
format = require('util').format,
util = require('../../util'),
cliUtils = require('./cli-utils'),
print = require('../../print'),
pad = cliUtils.padLeft,
LF = '\n',
SPC = ' ',
DOT = '.',
E = '',
CACHED_TIMING_PHASE = '(cache)',
TIMING_TABLE_HEADERS = {
prepare: 'prepare',
wait: 'wait',
dns: 'dns-lookup',
tcp: 'tcp-handshake',
secureHandshake: 'ssl-handshake',
firstByte: 'transfer-start',
download: 'download',
process: 'process',
total: 'total'
},
BODY_CLIP_SIZE = 2048,
PostmanCLIReporter,
timestamp,
extractSNR;
// sets theme for colors for console logging
colors.setTheme({
log: 'grey',
info: 'cyan',
warn: 'yellow',
debug: 'blue',
error: 'red'
});
extractSNR = function (executions) {
var snr;
// eslint-disable-next-line lodash/collection-method-value
_.isArray(executions) && _.forEachRight(executions, function (execution) {
var nextReq = _.get(execution, 'result.return.nextRequest');
if (nextReq) {
snr = nextReq;
return false;
}
});
return snr;
};
/**
* CLI reporter
*
* @param {EventEmitter} emitter - An EventEmitter instance with event handler attachers to trigger reporting.
* @param {Object} reporterOptions - CLI reporter options object.
* @param {Boolean=} reporterOptions.silent - Boolean flag to turn off CLI reporting altogether, if set to true.
* @param {Boolean=} reporterOptions.noAssertions - Boolean flag to turn off assertion reporting, if set to true.
* @param {Boolean=} reporterOptions.noSuccessAssertions - Boolean flag, if true, turn off report successful assertions.
* @param {Boolean=} reporterOptions.noSummary - Boolean flag to turn off summary reporting altogether, if set to true.
* @param {Boolean=} reporterOptions.noFailures - Boolean flag to turn off failure reporting altogether, if set to true.
* @param {Boolean=} reporterOptions.noConsole - Boolean flag to turn off console logging, if set to true.
* @param {Boolean=} reporterOptions.noBanner - Boolean flag to turn off newman banner, if set to true.
* @param {Object} options - A set of generic collection run options.
* @returns {*}
*/
PostmanCLIReporter = function (emitter, reporterOptions, options) {
var currentGroup = options.collection,
inspect = cliUtils.inspector(options),
wrap = cliUtils.wrapper(),
symbols = cliUtils.symbols(options.disableUnicode);
// respect silent option to not report anything
if (reporterOptions.silent || options.silent) {
return; // we simply do not register anything!
}
// disable colors based on `noTTY`.
cliUtils.noTTY(options.color) && colors.disable();
// we register the `done` listener first so that in case user does not want to show results of collection run, we
// simply do not register the other events
emitter.on('done', function () {
// for some reason, if there is no run summary, it is unexpected and hence don't validate this
var run = this.summary.run;
// show the summary table (provided option does not say it is not to be shown)
if (!reporterOptions.noSummary) {
print(LF + PostmanCLIReporter.parseStatistics(run.stats, run.timings, run.transfers, options) + LF);
}
// show the failures table (provided option does not say it is not to be shown)
if (!reporterOptions.noFailures && run.failures && run.failures.length) {
print(LF + PostmanCLIReporter.parseFailures(run.failures) + LF);
}
});
emitter.on('start', function () {
var collectionIdentifier = currentGroup && (currentGroup.name || currentGroup.id);
if (!reporterOptions.noBanner) {
// print the newman banner
print('%s\n\n', colors.reset('newman'));
}
// print the collection name and newman info line
collectionIdentifier && print.lf('%s', colors.reset(collectionIdentifier));
});
emitter.on('beforeIteration', function (err, o) {
if (err || o.cursor.cycles <= 1) {
return; // do not print iteration banner if it is a single iteration run
}
// print the iteration info line
print.lf(LF + colors.gray.underline('Iteration %d/%d'), o.cursor.iteration + 1, o.cursor.cycles);
});
emitter.on('test', function (err, o) {
if (err) {
return;
}
var snr = extractSNR(o.executions);
if (snr) {
print.lf(LF + colors.gray('Attempting to set next request to', snr));
}
});
emitter.on('beforeItem', function (err, o) {
if (err) { return; }
var itemGroup = o.item.parent(),
root = !itemGroup || (itemGroup === options.collection);
// in case this item belongs to a separate folder, print that folder name
if (itemGroup && (currentGroup !== itemGroup)) {
!root && print('\n%s %s', symbols.folder, colors.reset(util.getFullName(itemGroup)));
// set the flag that keeps track of the currently running group
currentGroup = itemGroup;
}
// we print the item name. the symbol prefix denotes if the item is in root or under folder.
// @todo - when we do indentation, we would not need symbolic representation
o.item && print.lf('\n%s %s', (root ?
symbols.root : symbols.sub), colors.reset(o.item.name || E));
});
// print out the request name to be executed and start a spinner
emitter.on('beforeRequest', function (err, o) {
if (err || !o.request) { return; }
if (reporterOptions.showTimestamps) {
var currentTime = new Date();
timestamp = '[' + currentTime.toLocaleString() + ']';
print(' %s %s %s ',
colors.gray(timestamp),
colors.gray(o.request.method),
colors.gray(o.request.url.toString()));
}
else {
print(' %s %s ',
colors.gray(o.request.method),
colors.gray(o.request.url.toString()));
}
!options.disableUnicode && print().wait(colors.gray);
});
// output the response code, reason and time
emitter.on('request', function (err, o) {
if (err) {
print.lf(colors.red('[errored]'));
print.lf(colors.red(' %s'), err.message);
return;
}
if (!(o.request && o.response)) {
print.lf(colors.red('[errored]'));
print.lf(colors.red(' %s'), 'Internal error! Could not read response data.');
return;
}
// quickly print out basic non verbose response meta and exit
if (!options.verbose) {
print.lf(colors.gray('[%d %s, %s, %s]'), o.response.code, o.response.reason(),
util.filesize(o.response.size().total), util.prettyms(o.response.responseTime));
return;
}
// this point onwards the output is verbose. a tonne of variables are created here for
// keeping the output clean and readable
let req = o.request,
res = o.response,
// set values here with abundance of caution to avoid erroring out
reqSize = util.filesize(req.size().total),
resSize = util.filesize(res.size().total),
code = res.code,
reason = res.reason(),
mime = res.contentInfo() || {},
timings = _.last(_.get(o, 'history.execution.data')),
reqHeadersLen = _.get(req, 'headers.members.length'),
resHeadersLen = _.get(res, 'headers.members.length'),
resTime = util.prettyms(res.responseTime || 0),
reqText = (options.verbose && req.body) ? req.body.toString() : E,
reqTextLen = req.size().body || Buffer.byteLength(reqText),
resText = options.verbose ? res.text() : E,
resTextLen = res.size().body || Buffer.byteLength(resText),
reqBodyMode = _.get(req, 'body.mode', ''),
resSummary = [
`${mime.contentType}`,
`${mime.mimeType}`,
`${mime.mimeFormat}`,
`${mime.charset}`
].join(` ${colors.gray(symbols.star)} `);
print.lf(SPC); // also flushes out the circling progress icon
// for clean readability of code. this section compiles the cli string for one line of
// req-res combined summary. this looks somewhat like below:
// >> 200 OK ★ 979ms time ★ 270B↑ 793B↓ size ★ 7↑ 7↓ headers ★ 0 cookies
print.lf(SPC + SPC + [
`${code} ${reason}`,
`${resTime} ${colors.gray('time')}`,
`${reqSize}${colors.gray(symbols.up)} ${resSize}${colors.gray(symbols.down)} ${colors.gray('size')}`,
`${reqHeadersLen}${colors.gray(symbols.up)} ` +
`${resHeadersLen}${colors.gray(symbols.down)} ${colors.gray('headers')}`,
`${_.get(res, 'cookies.members.length')} ${colors.gray('cookies')}`
].join(` ${colors.gray(symbols.star)} `));
// print request body
if (reqTextLen) {
// truncate very large request (is 2048 large enough?)
if (reqTextLen > BODY_CLIP_SIZE) {
reqText = reqText.substr(0, BODY_CLIP_SIZE) +
colors.brightWhite(`\n(showing ${util.filesize(BODY_CLIP_SIZE)}/${util.filesize(reqTextLen)})`);
}
reqText = wrap(reqText, ` ${colors.white(symbols.console.middle)} `);
// eslint-disable-next-line max-len
print.buffer(` ${colors.white(symbols.console.top)} ${colors.white(symbols.up)} ${reqBodyMode} ${colors.gray(symbols.star)} ${util.filesize(reqTextLen)}\n`,
colors.white(` ${symbols.console.bottom}`))
// tweak the message to ensure that its surrounding is not brightly coloured.
// also ensure to remove any blank lines generated due to util.inspect
.nobuffer(colors.gray(reqText.replace(/\n\s*\n/g, LF) + LF));
print.lf(SPC); // visual tweak: flushes out the buffer of wrapping body above
}
// print response body
if (resTextLen) {
// truncate very large response (is 2048 large enough?)
if (resTextLen > BODY_CLIP_SIZE) {
resText = resText.substr(0, BODY_CLIP_SIZE) +
colors.brightWhite(`\n(showing ${util.filesize(BODY_CLIP_SIZE)}/${util.filesize(resTextLen)})`);
}
resText = wrap(resText, ` ${colors.white(symbols.console.middle)} `);
// eslint-disable-next-line max-len
print.buffer(` ${colors.white(symbols.console.top)} ${colors.white(symbols.down)} ${resSummary} ${colors.gray(symbols.star)} ${util.filesize(resTextLen)}\n`,
colors.white(` ${symbols.console.bottom}`))
// tweak the message to ensure that its surrounding is not brightly coloured.
// also ensure to remove any blank lines generated due to util.inspect
.nobuffer(colors.gray(resText.replace(/\n\s*\n/g, LF) + LF));
}
// print the line of response body meta one liner if there is no response body
// if there is one, we would already print it across the body braces above.
else {
// we need to do some newline related shenanigans here so that the output looks clean
// in the absence of the request body block
print.lf(` ${symbols.down} ${resSummary}`);
}
// print timing info of the request
timings = timings && timings.timings; // if there are redirects, get timings for the last request sent
if (timings) {
// adds nice units to all time data in the object
let timingPhases = util.beautifyTime(sdk.Response.timingPhases(timings)),
timingTable = new Table({
chars: _.defaults({ mid: '', middle: '' }, cliUtils.cliTableTemplate_Blank),
colAligns: _.fill(Array(_.size(timingPhases)), 'left'),
style: { 'padding-left': 2 }
});
timingPhases = _.transform(TIMING_TABLE_HEADERS, (result, header, key) => {
if (_.has(timingPhases, key)) {
result.headers.push(colors.white(header));
result.values.push(colors.log(timingPhases[key] || CACHED_TIMING_PHASE));
}
}, { headers: [], values: [] });
timingTable.push(timingPhases.headers); // add name of phases in the table
timingTable.push(timingPhases.values); // add time of phases in the table
print(LF + timingTable + LF + LF);
}
});
// Print script errors in real time
emitter.on('script', function (err, o) {
err && print.lf(colors.red.bold('%s⠄ %s in %s-script'), pad(this.summary.run.failures.length, 3, SPC), err.name,
o.event && o.event.listen || 'unknown');
});
!reporterOptions.noAssertions && emitter.on('assertion', function (err, o) {
var passed = !err;
// handle skipped test display
if (o.skipped && !reporterOptions.noSuccessAssertions) {
print.lf('%s %s', colors.cyan(' - '), colors.cyan('[skipped] ' + o.assertion));
return;
}
if (passed && reporterOptions.noSuccessAssertions) {
return;
}
// print each test assertions
if (reporterOptions.showTimestamps) {
timestamp = '[' + new Date().toLocaleTimeString() + ']';
print.lf(' %s%s %s', colors.gray(timestamp), passed ? colors.green(` ${symbols.ok} `) :
colors.red.bold(pad(this.summary.run.failures.length, 2, SPC) + symbols.dot), passed ?
colors.gray(o.assertion) : colors.red.bold(o.assertion));
}
else {
print.lf('%s %s', passed ? colors.green(` ${symbols.ok} `) :
colors.red.bold(pad(this.summary.run.failures.length, 3, SPC) + symbols.dot), passed ?
colors.gray(o.assertion) : colors.red.bold(o.assertion));
}
});
// show user console logs in a neatly formatted way (provided user has not disabled the same)
!reporterOptions.noConsole && emitter.on('console', function (err, o) {
if (err) { return; }
var color = colors[o.level] || colors.gray,
message;
// we first merge all messages to a string. while merging we run the values to util.inspect to colour code the
// messages based on data type
message = wrap(_.reduce(o.messages, function (log, message) { // wrap the whole message to the window size
return (log += (log ? colors.white(', ') : '') + inspect(message));
}, E), ` ${color(symbols.console.middle)} `); // add an indentation line at the beginning
// print the timestamp if the falg is present
if (reporterOptions.showTimestamps) {
print(LF + ' %s', colors.gray('[' + new Date().toLocaleTimeString() + ']' + LF));
}
print.buffer(color(` ${symbols.console.top}\n`), color(` ${symbols.console.bottom}\n`))
// tweak the message to ensure that its surrounding is not brightly coloured.
// also ensure to remove any blank lines generated due to util.inspect
.nobuffer(colors.gray(message.replace(/\n\s*\n/g, LF) + LF));
});
};
_.assignIn(PostmanCLIReporter, {
// @todo: change function signature to accept run object and options, thereby reducing parameters
/**
* A CLI reporter method to parse collection run statistics into a CLI table.
*
* @param {Object} stats - The cumulative collection run status object.
* @param {Object} stats.iterations - A set of values for total, pending, and failed iterations.
* @param {Number} stats.iterations.total - Total iterations in the current collection run.
* @param {Number} stats.iterations.pending - Pending iterations in the current collection run.
* @param {Number} stats.iterations.failed - Failed iterations in the current collection run.
* @param {Object} stats.requests - A set of values for total, pending, and failed requests.
* @param {Number} stats.requests.total - Total requests in the current collection run.
* @param {Number} stats.requests.pending - Pending requests in the current collection run.
* @param {Number} stats.requests.failed - Failed requests in the current collection run.
* @param {Object} stats.testScripts - A set of values for total, pending, and failed testScripts.
* @param {Number} stats.testScripts.total - Total testScripts in the current collection run.
* @param {Number} stats.testScripts.pending - Pending testScripts in the current collection run.
* @param {Number} stats.testScripts.failed - Failed testScripts in the current collection run.
* @param {Object} stats.prerequestScripts - A set of values for total, pending, and failed prerequestScripts.
* @param {Number} stats.prerequestScripts.total - Total prerequestScripts in the current collection run.
* @param {Number} stats.prerequestScripts.pending - Pending prerequestScripts in the current collection run.
* @param {Number} stats.prerequestScripts.failed - Failed prerequestScripts in the current collection run.
* @param {Object} stats.assertions - A set of values for total, pending, and failed assertions.
* @param {Number} stats.assertions.total - Total assertions in the current collection run.
* @param {Number} stats.assertions.pending - Pending assertions in the current collection run.
* @param {Number} stats.assertions.failed - Failed assertions in the current collection run.
* @param {Object} timings - A set of values for the timings of the current collection run.
* @param {Number} timings.completed - The end timestamp for the current collection run.
* @param {Number} timings.started - The start timestamp for the current collection run
* @param {String} timings.responseAverage - The average response time across all requests
* @param {String} timings.responseMin - The minimum response time across all requests
* @param {String} timings.responseMax - The maximum response time across all requests
* @param {String} timings.responseSd - Standard deviation of response time across all requests
* @param {String} timings.dnsAverage - The average DNS lookup time of the run
* @param {String} timings.dnsMin - The minimum DNS lookup time of the run
* @param {String} timings.dnsMax - The maximum DNS lookup time of the run
* @param {String} timings.dnsSd - Standard deviation of DNS lookup time of the run
* @param {String} timings.firstByteAverage - The average first byte time of the run
* @param {String} timings.firstByteMin - The minimum first byte time of the run
* @param {String} timings.firstByteMax - The maximum first byte time of the run
* @param {String} timings.firstByteSd - Standard deviation of first byte time of the run
* @param {Object} transfers - A set of details on the network usage for the current collection run.
* @param {String} transfers.responseTotal - The net extent of the data transfer achieved during the collection run.
* @param {Object} options - The set of generic collection run options.
* @returns {Table} - The constructed collection run statistics table.
*/
parseStatistics (stats, timings, transfers, options) {
var summaryTable;
// create the summary table
summaryTable = new Table({
chars: options.disableUnicode && cliUtils.cliTableTemplateFallback,
style: { head: [] },
head: [E, 'executed', ' failed'],
colAligns: ['right', 'right', 'right'],
colWidths: [25]
});
// add specific rows to show in summary
stats && _.forEach([{
source: 'iterations',
label: 'iterations'
}, {
source: 'requests',
label: 'requests'
}, {
source: 'testScripts',
label: 'test-scripts'
}, {
source: 'prerequestScripts',
label: 'prerequest-scripts'
}, {
source: 'assertions',
label: 'assertions'
}], function (row) {
var metric = stats[row.source],
label = row.label;
// colour the label based on the failure or pending count of the metric
label = metric.failed ? colors.red(label) : (metric.pending ? label : colors.green(label));
// push the statistics
summaryTable.push([
label,
metric.total,
(metric.failed ? colors.red(metric.failed) : metric.failed)
// @todo - add information of pending scripts
// (metric.failed ? colors.red(metric.failed) : metric.failed) +
// (metric.pending ? format(' (%d pending)', metric.pending) : E)
]);
});
// add the total execution time to summary
timings && summaryTable.push([{
colSpan: 3,
content: format('total run duration: %s', util.prettyms(timings.completed - timings.started)),
hAlign: 'left' // since main style was set to right
}]);
// add row to show total data received
transfers && summaryTable.push([{
colSpan: 3,
content: format('total data received: %s (approx)', util.filesize(transfers.responseTotal)),
hAlign: 'left'
}]);
// add rows containing average time of different request phases
timings && _.forEach({
response: 'average response time:',
dns: 'average DNS lookup time:',
firstByte: 'average first byte time:'
}, (value, key) => {
timings[`${key}Average`] && summaryTable.push([{
colSpan: 3,
content: format(`${value} %s [min: %s, max: %s, s.d.: %s]`,
util.prettyms(timings[`${key}Average`]),
util.prettyms(timings[`${key}Min`]),
util.prettyms(timings[`${key}Max`]),
util.prettyms(timings[`${key}Sd`])),
hAlign: 'left'
}]);
});
return summaryTable;
},
/**
* A CLI reporter method to parse collection run failure statistics into a CLI table.
*
* @param {Array} failures - An array of failure objects.
* @returns {Table} - The constructed CLI failure Table object.
*/
parseFailures (failures) {
var failureTable = new Table({
head: [{
hAlign: 'right',
content: colors.red.underline('#')
}, colors.red.underline('failure'),
colors.red.underline('detail')],
chars: cliUtils.cliTableTemplate_Blank,
wordWrap: true,
colAligns: ['right'],
colWidths: cliUtils.noTTY() ? [] : (function (size, indexOrder) {
var colWidths = [];
if (size.exists && size.width && (size.width > 20)) {
colWidths[0] = indexOrder + 3;
colWidths[1] = parseInt((size.width - colWidths[0]) * 0.2, 10);
colWidths[2] = parseInt(size.width - (colWidths[0] + colWidths[1] + 5), 10);
}
return colWidths;
}(cliUtils.dimension(), Number(failures.length.toString().length)))
});
_.forEach(failures, function (failure, index) {
var name = failure.error && failure.error.name || E,
message = failure.error && failure.error.test || E;
// augment name with iteration information
failure.cursor && (failure.cursor.cycles > 1) &&
(name += LF + colors.gray('iteration: ' + (failure.cursor.iteration + 1)));
// include the assertion error message in the failure details
failure.error && (message += LF + colors.gray(failure.error.message || E));
// augment the message with stack information
failure.at && (message += LF + colors.gray('at ' + failure.at));
// augment message with item information
failure.source &&
(message += format(colors.gray('\ninside "%s"'), util.getFullName(failure.source)));
failureTable.push([pad(Number(index + 1), Number(failures.length.toString().length)).toString() +
DOT, name, message]);
});
return failureTable;
}
});
// Mark the CLI reporter as dominant, so that no two dominant reporters are together
PostmanCLIReporter.prototype.dominant = true;
module.exports = PostmanCLIReporter;

37
node_modules/newman/lib/reporters/emojitrain.js generated vendored Normal file
View File

@@ -0,0 +1,37 @@
var SmileyReporter;
/**
* Fills your collection run (read life) with a bunch of Emojis 😀.
*
* @param {Object} newman - The collection run object with event handling hooks to enable reporting.
* @param {Object} reporterOptions - A set of reporter specific run options.
* @param {Object} options - A set of generic collection run options.
* @returns {*}
*/
SmileyReporter = function (newman, reporterOptions, options) {
if (options.silent || reporterOptions.silent) {
return;
}
var fails = {},
noteFailure;
noteFailure = function (err, args) {
err && (fails[args.cursor.ref] = true);
};
newman.on('script', noteFailure);
newman.on('request', noteFailure);
newman.on('assertion', noteFailure);
newman.on('item', function (err, args) {
process.stdout.write((err || fails[args.cursor.ref]) ? '😢 ' : '😀 ');
});
newman.on('done', function (err) {
console.info((err || Object.keys(fails).length) ? ' 😭' : ' 😍');
});
};
SmileyReporter.prototype.dominant = true;
module.exports = SmileyReporter;

22
node_modules/newman/lib/reporters/json/index.js generated vendored Normal file
View File

@@ -0,0 +1,22 @@
var _ = require('lodash');
/**
* Reporter that simply dumps the summary object to file (default: newman-run-report.json).
*
* @param {Object} newman - The collection run object, with event hooks for reporting run details.
* @param {Object} options - A set of collection run options.
* @param {String} options.export - The path to which the summary object must be written.
* @returns {*}
*/
module.exports = function (newman, options) {
newman.on('beforeDone', function (err, o) {
if (err) { return; }
newman.exports.push({
name: 'json-reporter',
default: 'newman-run-report.json',
path: options.export,
content: JSON.stringify(_.omit(o.summary, 'exports'), 0, 2)
});
});
};

164
node_modules/newman/lib/reporters/junit/index.js generated vendored Normal file
View File

@@ -0,0 +1,164 @@
var _ = require('lodash'),
xml = require('xmlbuilder'),
util = require('../../util'),
JunitReporter;
/**
* A function that creates raw XML to be written to Newman JUnit reports.
*
* @param {Object} newman - The collection run object, with a event handler setter, used to enable event wise reporting.
* @param {Object} reporterOptions - A set of JUnit reporter run options.
* @param {String=} reporterOptions.export - Optional custom path to create the XML report at.
* @returns {*}
*/
JunitReporter = function (newman, reporterOptions) {
newman.on('beforeDone', function () {
var report = _.get(newman, 'summary.run.executions'),
collection = _.get(newman, 'summary.collection'),
cache,
root,
testSuitesExecutionTime = 0,
executionTime = 0,
timestamp,
classname;
if (!report) {
return;
}
classname = _.upperFirst(_.camelCase(collection.name).replace(/\W/g, ''));
root = xml.create('testsuites', { version: '1.0', encoding: 'UTF-8' });
root.att('name', collection.name);
root.att('tests', _.get(newman, 'summary.run.stats.tests.total', 'unknown'));
cache = _.transform(report, function (accumulator, execution) {
accumulator[execution.item.id] = accumulator[execution.id] || [];
accumulator[execution.item.id].push(execution);
}, {});
timestamp = new Date(_.get(newman, 'summary.run.timings.started')).toISOString();
_.forEach(cache, function (executions, itemId) {
var suite = root.ele('testsuite'),
currentItem,
tests = {},
errors = 0,
failures = 0,
errorMessages;
collection.forEachItem(function (item) {
(item.id === itemId) && (currentItem = item);
});
if (!currentItem) { return; }
suite.att('name', util.getFullName(currentItem));
suite.att('id', currentItem.id);
suite.att('timestamp', timestamp);
_.forEach(executions, function (execution) {
var iteration = execution.cursor.iteration,
errored,
msg = `Iteration: ${iteration}\n`;
// Process errors
if (execution.requestError) {
++errors;
errored = true;
msg += ('RequestError: ' + (execution.requestError.stack) + '\n');
}
msg += '\n---\n';
_.forEach(['testScript', 'prerequestScript'], function (prop) {
_.forEach(execution[prop], function (err) {
if (err.error) {
++errors;
errored = true;
msg = (msg + prop + 'Error: ' + (err.error.stack || err.error.message));
msg += '\n---\n';
}
});
});
if (errored) {
errorMessages = _.isString(errorMessages) ? (errorMessages + msg) : msg;
}
// Process assertions
_.forEach(execution.assertions, function (assertion) {
var name = assertion.assertion,
err = assertion.error;
if (err) {
++failures;
(_.isArray(tests[name]) ? tests[name].push(err) : (tests[name] = [err]));
}
else {
tests[name] = [];
}
});
if (execution.assertions) {
suite.att('tests', execution.assertions.length);
}
else {
suite.att('tests', 0);
}
suite.att('failures', failures);
suite.att('errors', errors);
});
suite.att('time', _.mean(_.map(executions, function (execution) {
executionTime = _.get(execution, 'response.responseTime') / 1000 || 0;
testSuitesExecutionTime += executionTime;
return executionTime;
})).toFixed(3));
errorMessages && suite.ele('system-err').dat(errorMessages);
_.forOwn(tests, function (failures, name) {
var testcase = suite.ele('testcase'),
failure;
testcase.att('name', name);
testcase.att('time', executionTime.toFixed(3));
// Set the same classname for all the tests
testcase.att('classname', _.get(testcase.up(), 'attributes.name.value',
classname));
if (failures && failures.length) {
failure = testcase.ele('failure');
failure.att('type', 'AssertionFailure');
failure.dat('Failed ' + failures.length + ' times.');
failure.dat('Collection JSON ID: ' + collection.id + '.');
failure.dat('Collection name: ' + collection.name + '.');
failure.dat('Request name: ' + util.getFullName(currentItem) + '.');
failure.dat('Test description: ' + name + '.');
if (failures.length !== 0) {
failure.att('message', failures[0].message);
failure.dat('Error message: ' + failures[0].message + '.');
failure.dat('Stacktrace: ' + failures[0].stack + '.');
}
}
});
});
root.att('time', testSuitesExecutionTime.toFixed(3));
newman.exports.push({
name: 'junit-reporter',
default: 'newman-run-report.xml',
path: reporterOptions.export,
content: root.end({
pretty: true,
indent: ' ',
newline: '\n',
allowEmpty: false
})
});
});
};
module.exports = JunitReporter;

36
node_modules/newman/lib/reporters/progress.js generated vendored Normal file
View File

@@ -0,0 +1,36 @@
var progress = require('cli-progress'),
ProgressReporter;
/**
* Little reporter that generates a collection progress status bar on CLI.
*
* @param {Object} newman - A run object with event handler specification methods.
* @param {Function} newman.on - An event setter method that provides hooks for reporting collection run progress.
* @param {Object} reporterOptions - A set of reporter specific run options.
* @param {Object} options - A set of generic collection run options.
* @returns {*}
*/
ProgressReporter = function (newman, reporterOptions, options) {
if (options.silent || reporterOptions.silent) {
return;
}
var bar = new progress.Bar({});
newman.on('start', function (err, o) {
if (err) { return; }
bar.start(o.cursor.length * o.cursor.cycles, 0);
});
newman.on('item', function () {
bar.increment();
});
newman.on('done', function () {
bar.stop();
});
};
ProgressReporter.prototype.dominant = true;
module.exports = ProgressReporter;

136
node_modules/newman/lib/run/export-file.js generated vendored Normal file
View File

@@ -0,0 +1,136 @@
var fs = require('fs'),
nodePath = require('path'),
_ = require('lodash'),
async = require('async'),
mkdirp = require('mkdirp'),
// @todo: ES6: Change the sequence below to use object destructuring when Node v4 support is dropped
joinPath = nodePath.join,
parsePath = nodePath.parse,
resolvePath = nodePath.resolve,
/**
* The root path specifier
*
* @const
* @private
* @type {string}
*/
E = '',
/**
* Default timestamp separator.
*
* @const
* @private
* @type {string}
*/
TS_SEP = '-',
/**
* Writes the specified content to a file at the provided path.
*
* @param {Object} path - A set of path details for file writing.
* @param {String|Buffer} content - The content to be written to the file.
* @param {Object} options - A set of options for the current file write.
* @param {Function} cb - The callback invoked when the file writing operation has completed, with/without errors.
*/
writeFile = function (path, content, options, cb) {
fs.writeFile(path.unparsed, content, function (err) {
cb(_.set(err, 'help',
`error writing file "${path.unparsed}" for ${options.name || 'unknown-source'}`), path);
});
},
/**
* Generate a timestamp from date
*
* @param {Date=} date - The timestamp used to mark the exported file.
* @param {String=} separator - The optional string with which to separate different sections of the timestamp,
* defaults to TS_SEP
* @returns {String} - yyyy-mm-dd-HH-MM-SS-MS-0
*/
timestamp = function (date, separator) {
// use the iso string to ensure left padding and other stuff is taken care of
return (date || new Date()).toISOString().replace(/[^\d]+/g, _.isString(separator) ? separator : TS_SEP);
};
/**
* Module whose job is to export a file which is in an export format.
*
* @param {Object} options - The set of file export options.
* @param {String} options.path - The path to the exported file.
* @param {String|Object} options.content - The JSON / stringified content that is to be written to the file.
* @param {Function} done - The callback whose invocation marks the end of the file export routine.
* @returns {*}
*/
module.exports = function (options, done) {
// parse the path if one is available as string
var path = _.isString(options.path) && parsePath(resolvePath(options.path)),
content = _.isPlainObject(options.content) ? JSON.stringify(options.content, 0, 2) : (options.content || E);
// if a path was not provided by user, we need to prepare the default path. but create the default path only if one
// is provided.
if (!path && _.isString(options.default)) {
path = parsePath(options.default);
// delete the path and directory if one is detected when parsing defaults
path.root = E;
path.dir = 'newman';
// append timestamp
path.name = `${path.name}-${timestamp()}0`; // @todo make -0 become incremental if file name exists
path.base = path.name + path.ext;
}
// final check that path is valid
if (!(path && path.base)) {
return;
}
// now sore the unparsed result back for quick re-use during writing and a single place for unparsing
path.unparsed = joinPath(path.dir, path.base);
// in case the path has a directory, ensure that the directory is available
if (path.dir) {
async.waterfall([
function (next) {
mkdirp(path.dir)
.then(() => {
return next();
})
.catch((err) => {
return next(_.set(err, 'help',
`error creating path for file "${path.unparsed}" for ${options.name || 'unknown-source'}`));
});
},
function (next) {
fs.stat(path.unparsed, function (err, stat) { // eslint-disable-line handle-callback-err
next(null, stat);
});
},
function (stat, next) {
var target;
// handle cases where the specified export path is a pre-existing directory
if (stat && stat.isDirectory()) {
target = parsePath(options.default);
// append timestamp
// @todo make -0 become incremental if file name exists
target.name += '-' + timestamp() + '0';
target.base = target.name + target.ext;
path.unparsed = joinPath(path.unparsed, target.base);
}
next(null, path);
},
function (path, next) {
writeFile(path, content, options, next);
}
], done);
}
else {
writeFile(path, content, options, done);
}
};

441
node_modules/newman/lib/run/index.js generated vendored Normal file
View File

@@ -0,0 +1,441 @@
var _ = require('lodash'),
asyncEach = require('async/each'),
sdk = require('postman-collection'),
runtime = require('postman-runtime'),
request = require('postman-request'),
EventEmitter = require('eventemitter3'),
SecureFS = require('./secure-fs'),
RunSummary = require('./summary'),
getOptions = require('./options'),
exportFile = require('./export-file'),
util = require('../util'),
/**
* This object describes the various events raised by Newman, and what each event argument contains.
* Error and cursor are present in all events.
*
* @type {Object}
*/
runtimeEvents = {
beforeIteration: [],
beforeItem: ['item'],
beforePrerequest: ['events', 'item'],
prerequest: ['executions', 'item'],
beforeRequest: ['request', 'item'],
request: ['response', 'request', 'item', 'cookies', 'history'],
beforeTest: ['events', 'item'],
test: ['executions', 'item'],
item: ['item'],
iteration: [],
beforeScript: ['script', 'event', 'item'],
script: ['execution', 'script', 'event', 'item']
},
/**
* load all the default reporters here. if you have new reporter, add it to this list
* we know someone, who does not like dynamic requires
*
* @type {Object}
*/
defaultReporters = {
cli: require('../reporters/cli'),
json: require('../reporters/json'),
junit: require('../reporters/junit'),
progress: require('../reporters/progress'),
emojitrain: require('../reporters/emojitrain')
},
/**
* The object of known reporters and their install instruction in case the reporter is not loaded.
* Pad message with two spaces since its a follow-up message for reporter warning.
*
* @private
* @type {Object}
*/
knownReporterErrorMessages = {
html: ' run `npm install newman-reporter-html`\n',
teamcity: ' run `npm install newman-reporter-teamcity`\n'
},
/**
* Multiple ids or names entrypoint lookup strategy.
*
* @private
* @type {String}
*/
MULTIENTRY_LOOKUP_STRATEGY = 'multipleIdOrName';
/**
* Runs the collection, with all the provided options, returning an EventEmitter.
*
* @param {Object} options - The set of wrapped options, passed by the CLI parser.
* @param {Collection|Object|String} options.collection - A JSON / Collection / String representing the collection.
* @param {Object|String} options.environment - An environment JSON / file path for the current collection run.
* @param {Object|String} options.globals - A globals JSON / file path for the current collection run.
* @param {String} options.workingDir - Path of working directory that contains files needed for the collection run.
* @param {String} options.insecureFileRead - If true, allow reading files outside of working directory.
* @param {Object|String} options.iterationData - An iterationData JSON / file path for the current collection run.
* @param {Object|String} options.reporters - A set of reporter names and their associated options for the current run.
* @param {Object|String} options.cookieJar - A tough-cookie cookieJar / file path for the current collection run.
* @param {String} options.exportGlobals - The relative path to export the globals file from the current run to.
* @param {String} options.exportEnvironment - The relative path to export the environment file from the current run to.
* @param {String} options.exportCollection - The relative path to export the collection from the current run to.
* @param {String} options.exportCookieJar - The relative path to export the cookie jar from the current run to.
* @param {Function} callback - The callback function invoked to mark the end of the collection run.
* @returns {EventEmitter} - An EventEmitter instance with done and error event attachments.
*/
module.exports = function (options, callback) {
// validate all options. it is to be noted that `options` parameter is option and is polymorphic
(!callback && _.isFunction(options)) && (
(callback = options),
(options = {})
);
!_.isFunction(callback) && (callback = _.noop);
var emitter = new EventEmitter(), // @todo: create a new inherited constructor
runner = new runtime.Runner(),
stopOnFailure,
entrypoint;
// get the configuration from various sources
getOptions(options, function (err, options) {
if (err) {
return callback(err);
}
// ensure that the collection option is present before starting a run
if (!_.isObject(options.collection)) {
return callback(new Error('expecting a collection to run'));
}
// use client certificate list to allow different ssl certificates for
// different URLs
var sslClientCertList = options.sslClientCertList || [],
// allow providing custom cookieJar
cookieJar = options.cookieJar || request.jar();
// if sslClientCert option is set, put it at the end of the list to
// match all URLs that didn't match in the list
if (options.sslClientCert) {
sslClientCertList.push({
name: 'client-cert',
matches: [sdk.UrlMatchPattern.MATCH_ALL_URLS],
key: { src: options.sslClientKey },
cert: { src: options.sslClientCert },
passphrase: options.sslClientPassphrase
});
}
// iterates over the bail array and sets each item as an obj key with a value of boolean true
// [item1, item2] => {item1: true, item2: true}
if (_.isArray(options.bail)) {
options.bail = _.transform(options.bail, function (result, value) {
result[value] = true;
}, {});
}
// sets entrypoint to execute if options.folder is specified.
if (options.folder) {
entrypoint = { execute: options.folder };
// uses `multipleIdOrName` lookupStrategy in case of multiple folders.
_.isArray(entrypoint.execute) && (entrypoint.lookupStrategy = MULTIENTRY_LOOKUP_STRATEGY);
}
// sets stopOnFailure to true in case bail is used without any modifiers or with failure
// --bail => stopOnFailure = true
// --bail failure => stopOnFailure = true
(typeof options.bail !== 'undefined' &&
(options.bail === true || (_.isObject(options.bail) && options.bail.failure))) ?
stopOnFailure = true : stopOnFailure = false;
// store summary object and other relevant information inside the emitter
emitter.summary = new RunSummary(emitter, options);
// to store the exported content from reporters
emitter.exports = [];
// expose the runner object for reporter and programmatic use
emitter.runner = runner;
// now start the run!
runner.run(options.collection, {
stopOnFailure: stopOnFailure, // LOL, you just got trolled ¯\_(ツ)_/¯
abortOnFailure: options.abortOnFailure, // used in integration tests, to be considered for a future release
abortOnError: _.get(options, 'bail.folder'),
iterationCount: options.iterationCount,
environment: options.environment,
globals: options.globals,
entrypoint: entrypoint,
data: options.iterationData,
delay: {
item: options.delayRequest
},
timeout: {
global: options.timeout || 0,
request: options.timeoutRequest || 0,
script: options.timeoutScript || 0
},
fileResolver: new SecureFS(options.workingDir, options.insecureFileRead),
requester: {
useWhatWGUrlParser: true,
cookieJar: cookieJar,
followRedirects: _.has(options, 'ignoreRedirects') ? !options.ignoreRedirects : undefined,
strictSSL: _.has(options, 'insecure') ? !options.insecure : undefined,
timings: Boolean(options.verbose),
extendedRootCA: options.sslExtraCaCerts,
agents: _.isObject(options.requestAgents) ? options.requestAgents : undefined
},
certificates: sslClientCertList.length && new sdk.CertificateList({}, sslClientCertList)
}, function (err, run) {
if (err) { return callback(err); }
var callbacks = {},
// ensure that the reporter option type polymorphism is handled
reporters = _.isString(options.reporters) ? [options.reporters] : options.reporters,
// keep a track of start assertion indices of legacy assertions
legacyAssertionIndices = {};
// emit events for all the callbacks triggered by the runtime
_.forEach(runtimeEvents, function (definition, eventName) {
// intercept each runtime.* callback and expose a global object based event
callbacks[eventName] = function (err, cursor) {
var args = arguments,
obj = { cursor };
// convert the arguments into an object by taking the key name reference from the definition
// object
_.forEach(definition, function (key, index) {
obj[key] = args[index + 2]; // first two are err, cursor
});
args = [eventName, err, obj];
emitter.emit.apply(emitter, args); // eslint-disable-line prefer-spread
};
});
// add non generic callback handling
_.assignIn(callbacks, {
/**
* Emits event for start of the run. It injects/exposes additional objects useful for
* programmatic usage and reporters
*
* @param {?Error} err - An Error instance / null object.
* @param {Object} cursor - The run cursor instance.
* @returns {*}
*/
start (err, cursor) {
emitter.emit('start', err, {
cursor,
run
});
},
/**
* Bubbles up console messages.
*
* @param {Object} cursor - The run cursor instance.
* @param {String} level - The level of console logging [error, silent, etc].
* @returns {*}
*/
console (cursor, level) {
emitter.emit('console', null, {
cursor: cursor,
level: level,
messages: _.slice(arguments, 2)
});
},
/**
* The exception handler for the current run instance.
*
* @todo Fix bug of arg order in runtime.
* @param {Object} cursor - The run cursor.
* @param {?Error} err - An Error instance / null object.
* @returns {*}
*/
exception (cursor, err) {
emitter.emit('exception', null, {
cursor: cursor,
error: err
});
},
assertion (cursor, assertions) {
_.forEach(assertions, function (assertion) {
var errorName = _.get(assertion, 'error.name', 'AssertionError');
!assertion && (assertion = {});
// store the legacy assertion index
assertion.index && (legacyAssertionIndices[cursor.ref] = assertion.index);
emitter.emit('assertion', (assertion.passed ? null : {
name: errorName,
index: assertion.index,
test: assertion.name,
message: _.get(assertion, 'error.message', assertion.name || ''),
stack: errorName + ': ' + _.get(assertion, 'error.message', '') + '\n' +
' at Object.eval sandbox-script.js:' + (assertion.index + 1) + ':' +
((cursor && cursor.position || 0) + 1) + ')'
}), {
cursor: cursor,
assertion: assertion.name,
skipped: assertion.skipped,
error: assertion.error,
item: run.resolveCursor(cursor)
});
});
},
/**
* Custom callback to override the `done` event to fire the end callback.
*
* @todo Do some memory cleanup here?
* @param {?Error} err - An error instance / null passed from the done event handler.
* @param {Object} cursor - The run instance cursor.
* @returns {*}
*/
done (err, cursor) {
// in case runtime faced an error during run, we do not process any other event and emit `done`.
// we do it this way since, an error in `done` callback would have anyway skipped any intermediate
// events or callbacks
if (err) {
emitter.emit('done', err, emitter.summary);
callback(err, emitter.summary);
return;
}
// we emit a `beforeDone` event so that reporters and other such addons can do computation before
// the run is marked as done
emitter.emit('beforeDone', null, {
cursor: cursor,
summary: emitter.summary
});
_.forEach(['environment', 'globals', 'collection', 'cookie-jar'], function (item) {
// fetch the path name from options if one is provided
var path = _.get(options, _.camelCase(`export-${item}`));
// if the options have an export path, then add the item to export queue
path && emitter.exports.push({
name: item,
default: `newman-${item}.json`,
path: path,
content: item === 'cookie-jar' ?
cookieJar.toJSON() :
_(emitter.summary[item].toJSON())
.defaults({
name: item
})
.merge({
_postman_variable_scope: item,
_postman_exported_at: (new Date()).toISOString(),
_postman_exported_using: util.userAgent
})
.value()
});
});
asyncEach(emitter.exports, exportFile, function (err) {
// we now trigger actual done event which we had overridden
emitter.emit('done', err, emitter.summary);
callback(err, emitter.summary);
});
}
});
emitter.on('script', function (err, o) {
// bubble special script name based events
o && o.event && emitter.emit(o.event.listen + 'Script', err, o);
});
emitter.on('beforeScript', function (err, o) {
// bubble special script name based events
o && o.event && emitter.emit(_.camelCase('before-' + o.event.listen + 'Script'), err, o);
});
// initialise all the reporters
!emitter.reporters && (emitter.reporters = {});
_.isArray(reporters) && _.forEach(reporters, function (reporterName) {
// disallow duplicate reporter initialisation
if (_.has(emitter.reporters, reporterName)) { return; }
var Reporter;
try {
// check if the reporter is an external reporter
Reporter = require((function (name) { // ensure scoped packages are loaded
var prefix = '',
scope = (name.charAt(0) === '@') && name.substr(0, name.indexOf('/') + 1);
if (scope) {
prefix = scope;
name = name.substr(scope.length);
}
return prefix + 'newman-reporter-' + name;
}(reporterName)));
}
// @todo - maybe have a debug mode and log error there
catch (error) {
if (!defaultReporters[reporterName]) {
// @todo: route this via print module to respect silent flags
console.warn(`newman: could not find "${reporterName}" reporter`);
console.warn(' ensure that the reporter is installed in the same directory as newman');
// print install instruction in case a known reporter is missing
if (knownReporterErrorMessages[reporterName]) {
console.warn(knownReporterErrorMessages[reporterName]);
}
else {
console.warn(' please install reporter using npm\n');
}
}
}
// load local reporter if its not an external reporter
!Reporter && (Reporter = defaultReporters[reporterName]);
try {
// we could have checked _.isFunction(Reporter), here, but we do not do that so that the nature of
// reporter error can be bubbled up
Reporter && (emitter.reporters[reporterName] = new Reporter(emitter,
_.get(options, ['reporter', reporterName], {}), options));
}
catch (error) {
// if the reporter errored out during initialisation, we should not stop the run simply log
// the error stack trace for debugging
console.warn(`newman: could not load "${reporterName}" reporter`);
if (!defaultReporters[reporterName]) {
// @todo: route this via print module to respect silent flags
console.warn(` this seems to be a problem in the "${reporterName}" reporter.\n`);
}
console.warn(error);
}
});
// raise warning when more than one dominant reporters are used
(function (reporters) {
// find all reporters whose `dominant` key is set to true
var conflicts = _.keys(_.transform(reporters, function (conflicts, reporter, name) {
reporter.dominant && (conflicts[name] = true);
}));
(conflicts.length > 1) && // if more than one dominant, raise a warning
console.warn(`newman: ${conflicts.join(', ')} reporters might not work well together.`);
}(emitter.reporters));
// we ensure that everything is async to comply with event paradigm and start the run
setImmediate(function () {
run.start(callbacks);
});
});
});
return emitter;
};

396
node_modules/newman/lib/run/options.js generated vendored Normal file
View File

@@ -0,0 +1,396 @@
var _ = require('lodash'),
fs = require('fs'),
async = require('async'),
Collection = require('postman-collection').Collection,
VariableScope = require('postman-collection').VariableScope,
CookieJar = require('tough-cookie').CookieJar,
transformer = require('postman-collection-transformer'),
liquidJSON = require('liquid-json'),
parseCsv = require('csv-parse'),
util = require('../util'),
config = require('../config'),
/**
* The message displayed when the specified collection file can't be loaded.
*
* @const
* @type {String}
*/
COLLECTION_LOAD_ERROR_MESSAGE = 'collection could not be loaded',
/**
* The message displayed when the specified iteration data file can't be loaded.
*
* @const
* @type {String}
*/
ITERATION_DATA_LOAD_ERROR_MESSAGE = 'iteration data could not be loaded',
/**
* The message displayed when the specified environment or globals file can't be loaded.
*
* @const
* @type {String}
*/
LOAD_ERROR_MESSAGE = 'could not load ',
/**
* The set of postman collection transformer options, to convert collection v1 to collection v2.
*
* @const
* @type {Object}
*/
COLLECTION_TRANSFORMER_OPTION = { inputVersion: '1.0.0', outputVersion: '2.1.0' },
/**
* Accepts an object, and extracts the property inside an object which is supposed to contain the required data.
* In case of variables, it also extracts them into plain JS objects.
*
* @param {Object} source - The source wrapper object that may or may not contain inner wrapped properties.
* @param {String} type - "environment" or "globals", etc.
* @returns {Object} - The object representation of the current extracted property.
*/
extractModel = function (source, type) {
source = source[type] || source; // extract object that holds variable. these usually come from cloud API
if (!_.isObject(source)) {
return undefined;
}
// ensure we un-box the JSON if it comes from cloud-api or similar sources
!source.values && _.isObject(source[type]) && (source = source[type]);
// we ensure that environment passed as array is converted to plain object. runtime does this too, but we do it
// here for consistency of options passed to reporters
return source;
},
/**
* Loads the given data of type from a specified external location
*
* @param {String} type - The type of data to load.
* @param {String} location - The location to load from (file path or URL).
* @param {Object} options - The set of wrapped options.
* @param {function} cb - The callback function whose invocation marks the end of the external load routine.
* @returns {*}
*/
externalLoader = function (type, location, options, cb) {
return _.isString(location) ? util.fetchJson(type, location, options, function (err, data) {
if (err) {
return cb(err);
}
return cb(null, extractModel(data, type));
}) : cb(null, extractModel(location, type));
},
/**
* A helper method to process a collection and convert it to a V2 equivalent if necessary, and return it.
*
* @todo Drop support for the v1 collection format in Newman v5.
* Reference: https://github.com/postmanlabs/newman/pull/1660
*
* @param {Object} collection The input collection, specified as a JSON object.
* @param {Function} callback A handler function that consumes an error object and the processed collection.
* @returns {*}
*/
processCollection = function (collection, callback) {
if (util.isV1Collection(collection)) {
// @todo: route this via print module to respect silent flags
console.warn('newman: Newman v4 deprecates support for the v1 collection format');
console.warn(' Use the Postman Native app to export collections in the v2 format\n');
return transformer.convert(collection, COLLECTION_TRANSFORMER_OPTION, callback);
}
callback(null, collection);
},
/**
* Helper function that manages the load of environments and globals
*
* @private
* @param {String} type - The type of resource to load: collection, environment, etc.
* @param {String|Object} value - The value derived from the CLI or run command.
* @param {Object} options - The set of wrapped options.
* @param {Function} callback - The function invoked when the scope has been loaded.
*/
loadScopes = function (type, value, options, callback) {
var done = function (err, scope) {
if (err) { return callback(new Error(LOAD_ERROR_MESSAGE + `${type}\n ${err.message || err}`)); }
if (!_.isObject(scope)) {
return done(new Error(LOAD_ERROR_MESSAGE + type));
}
callback(null, new VariableScope(VariableScope.isVariableScope(scope) ? scope.toJSON() : scope));
};
if (_.isObject(value)) {
return done(null, value);
}
externalLoader(type, value, options, done);
},
/**
* Custom method to auto parse CSV values
*
* @private
* @param {String} value - CSV field value
* @param {Object} context - Context of field value
* @param {Boolean} context.quoting - A boolean indicating if the field was surrounded by quotes.
* @returns {String|Number|Date}
*/
csvAutoParse = function (value, context) {
if (context.quoting) {
// avoid parsing quoted values
return value;
}
if (util.isInt(value)) {
return parseInt(value, 10);
}
if (util.isFloat(value)) {
return parseFloat(value);
}
return value;
},
/**
* Custom configuration loaders for the required configuration keys.
*
* @type {Object}
*/
configLoaders = {
/**
* The collection file load helper for the current run.
*
* @param {Object|String} value - The collection, specified as a JSON object, or the path to it's file.
* @param {Object} options - The set of wrapped options.
* @param {Function} callback - The callback function invoked to mark the end of the collection load routine.
* @returns {*}
*/
collection: function (value, options, callback) {
/**
* The post collection load handler.
*
* @param {?Error} err - An Error instance / null, passed from the collection loader.
* @param {Object} collection - The collection / raw JSON object, passed from the collection loader.
* @returns {*}
*/
var done = function (err, collection) {
if (err) {
return callback(err);
}
// ensure that the collection option is present before starting a run
if (!_.isObject(collection)) {
return callback(new Error(COLLECTION_LOAD_ERROR_MESSAGE));
}
// ensure that the collection reference is an SDK instance
// @todo - should this be handled by config loaders?
collection = new Collection(Collection.isCollection(collection) ?
// if the option contain an instance of collection, we simply clone it for future use
// create a collection in case it is not one. user can send v2 JSON as a source and that will be
// converted to a collection
collection.toJSON() : collection);
callback(null, collection);
};
// if the collection has been specified as an object, convert to V2 if necessary and return the result
if (_.isObject(value)) {
return processCollection(value, done);
}
externalLoader('collection', value, options, function (err, data) {
if (err) {
return done(new Error(COLLECTION_LOAD_ERROR_MESSAGE +
(err.help ? `\n ${err.help}` : '') +
`\n ${err.message || err}`));
}
if (!_.isObject(data)) {
return done(new Error(COLLECTION_LOAD_ERROR_MESSAGE));
}
return processCollection(data, done);
});
},
/**
* The environment configuration object, loaded for the current collection run.
*
* @type {Object}
*/
environment: loadScopes.bind(this, 'environment'),
/**
* The object of globals, loaded for the collection run.
*
* @type {Object}
*/
globals: loadScopes.bind(this, 'globals'),
/**
* Helper function to sanitize folder option.
*
* @param {String[]|String} value - The list of folders to execute
* @param {Object} options - The set of wrapped options.
* @param {Function} callback - The callback function invoked to mark the end of the folder load routine.
* @returns {*}
*/
folder: function (value, options, callback) {
if (!value.length) {
return callback(); // avoids empty string or array
}
if (Array.isArray(value) && value.length === 1) {
return callback(null, value[0]); // avoids using multipleIdOrName strategy for a single item array
}
callback(null, value);
},
/**
* The iterationData loader module, with support for JSON or CSV data files.
*
* @param {String|Object[]} location - The path to the iteration data file for the current collection run, or
* the array of iteration data objects.
* @param {Object} options - The set of wrapped options.
* @param {Function} callback - The function invoked to indicate the end of the iteration data loading routine.
* @returns {*}
*/
iterationData: function (location, options, callback) {
if (_.isArray(location)) { return callback(null, location); }
util.fetch(location, function (err, data) {
if (err) {
return callback(new Error(ITERATION_DATA_LOAD_ERROR_MESSAGE + `\n ${err.message || err}`));
}
// Try loading as a JSON, fall-back to CSV.
async.waterfall([
(cb) => {
try {
return cb(null, liquidJSON.parse(data.trim()));
}
catch (e) {
return cb(null, undefined); // e masked to avoid displaying JSON parse errors for CSV files
}
},
(json, cb) => {
if (json) {
return cb(null, json);
}
// Wasn't JSON
parseCsv(data, {
columns: true, // infer the columns names from the first row
escape: '"', // escape character
cast: csvAutoParse, // function to cast values of individual fields
trim: true, // ignore whitespace immediately around the delimiter
relax: true, // allow using quotes without escaping inside unquoted string
relax_column_count: true, // ignore inconsistent columns count
bom: true // strip the byte order mark (BOM) from the input string
}, cb);
}
], (err, parsed) => {
if (err) {
return callback(new Error(ITERATION_DATA_LOAD_ERROR_MESSAGE + `\n ${err.message || err}`));
}
callback(null, parsed);
});
});
},
sslClientCertList: function (location, options, callback) {
if (Array.isArray(location)) {
return callback(null, location);
}
if (typeof location !== 'string') {
return callback(new Error('path for ssl client certificates list file must be a string'));
}
fs.readFile(location, function (err, value) {
if (err) {
return callback(new Error(`unable to read the ssl client certificates file "${location}"`));
}
try {
value = liquidJSON.parse(value.toString(util.detectEncoding(value)).trim());
}
catch (e) {
return callback(new Error(`the file at ${location} does not contain valid JSON data.`));
}
// ensure that `sslClientCertList` is an array
if (!Array.isArray(value)) {
return callback(new Error('expected ssl client certificates list to be an array.'));
}
return callback(null, value);
});
},
cookieJar: function (location, options, callback) {
if (_.isObject(location) && location.constructor.name === 'CookieJar') {
return callback(null, location);
}
if (typeof location !== 'string') {
return callback(new Error('cookieJar must be a path to a JSON file or a CookieJar instance'));
}
fs.readFile(location, function (err, value) {
if (err) {
return callback(new Error(`unable to read the cookie jar file "${location}"`));
}
try {
value = CookieJar.fromJSON(value.toString());
}
catch (e) {
return callback(new Error(`the file at ${location} does not contain valid JSON data.`));
}
return callback(null, value);
});
}
};
/**
* The helper function to load all file based information for the current collection run.
*
* @param {Object} options - The set of generic collection run options.
* @param {Function} callback - The function called to mark the completion of the configuration load routine.
* @returns {*}
*/
module.exports = function (options, callback) {
// set newman version used for collection run
options.newmanVersion = util.version;
// set working directory if not provided
options.workingDir = options.workingDir || process.cwd();
// allow insecure file read by default
options.insecureFileRead = Boolean(_.get(options, 'insecureFileRead', true));
config.get(options, { loaders: configLoaders, command: 'run' }, function (err, result) {
if (err) { return callback(err); }
!_.isEmpty(options.globalVar) && _.forEach(options.globalVar, function (variable) {
variable && (result.globals.set(variable.key, variable.value));
});
!_.isEmpty(options.envVar) && _.forEach(options.envVar, function (variable) {
variable && (result.environment.set(variable.key, variable.value));
});
callback(null, result);
});
};

187
node_modules/newman/lib/run/secure-fs.js generated vendored Normal file
View File

@@ -0,0 +1,187 @@
const fs = require('fs'),
_ = require('lodash'),
path = require('path'),
util = require('util'),
Readable = require('stream').Readable,
PPERM_ERR = 'PPERM: insecure file access outside working directory',
FUNCTION = 'function',
DEPRECATED_SYNC_WRITE_STREAM = 'SyncWriteStream',
EXPERIMENTAL_PROMISE = 'promises',
// Use simple character check instead of regex to prevent regex attack
/*
* Windows root directory can be of the following from
*
* | File System | Actual | Modified |
* |-------------|------------------|-------------------|
* | LFS (Local) | C:\Program | /C:/Program |
* | UNC | \\Server\Program | ///Server/Program |
*/
isWindowsRoot = function (path) {
const drive = path.charAt(1);
return ((path.charAt(0) === '/') &&
((drive >= 'A' && drive <= 'Z') || (drive >= 'a' && drive <= 'z')) &&
(path.charAt(2) === ':')) ||
path.slice(0, 3) === '///'; // Modified UNC path
},
stripTrailingSep = function (thePath) {
if (thePath[thePath.length - 1] === path.sep) {
return thePath.slice(0, -1);
}
return thePath;
},
pathIsInside = function (thePath, potentialParent) {
// For inside-directory checking, we want to allow trailing slashes, so normalize.
thePath = stripTrailingSep(thePath);
potentialParent = stripTrailingSep(potentialParent);
// Node treats only Windows as case-insensitive in its path module; we follow those conventions.
if (global.process.platform === 'win32') {
thePath = thePath.toLowerCase();
potentialParent = potentialParent.toLowerCase();
}
return thePath.lastIndexOf(potentialParent, 0) === 0 &&
(
thePath[potentialParent.length] === path.sep ||
thePath[potentialParent.length] === undefined
);
};
/**
* Secure file resolver wrapper over fs. It only allow access to files inside working directory unless specified.
*
* @param {*} workingDir - Path of working directory
* @param {*} [insecureFileRead=false] - If true, allow reading files outside working directory
* @param {*} [fileWhitelist=[]] - List of allowed files outside of working directory
*/
function SecureFS (workingDir, insecureFileRead = false, fileWhitelist = []) {
this._fs = fs;
this._path = path;
this.constants = this._fs.constants;
this.workingDir = workingDir;
this.insecureFileRead = insecureFileRead;
this.fileWhitelist = fileWhitelist;
this.isWindows = global.process.platform === 'win32';
}
/**
* Private method to resole the path based based on working directory
*
* @param {String} relOrAbsPath - Relative or absolute path to resolve
* @param {Array} whiteList - A list of absolute path to whitelist
*
* @returns {String} The resolved path
*/
SecureFS.prototype._resolve = function (relOrAbsPath, whiteList) {
// Special handling for windows absolute paths to work cross platform
this.isWindows && isWindowsRoot(relOrAbsPath) && (relOrAbsPath = relOrAbsPath.substring(1));
// Resolve the path from the working directory. The file should always be resolved so that
// cross os variations are mitigated
let resolvedPath = this._path.resolve(this.workingDir, relOrAbsPath);
// Check file is within working directory
if (!this.insecureFileRead && // insecureFile read disabled
!pathIsInside(resolvedPath, this.workingDir) && // File not inside working directory
!_.includes(whiteList, resolvedPath)) { // File not in whitelist
// Exit
return undefined;
}
return resolvedPath;
};
/**
* Asynchronous path resolver function
*
* @param {String} relOrAbsPath - Relative or absolute path to resolve
* @param {Array} [whiteList] - A optional list of additional absolute path to whitelist
* @param {Function} callback -
*/
SecureFS.prototype.resolvePath = function (relOrAbsPath, whiteList, callback) {
if (!callback && typeof whiteList === FUNCTION) {
callback = whiteList;
whiteList = [];
}
let resolvedPath = this._resolve(relOrAbsPath, _.concat(this.fileWhitelist, whiteList));
if (!resolvedPath) {
return callback(new Error(PPERM_ERR));
}
return callback(null, resolvedPath);
};
/**
* Synchronous path resolver function
*
* @param {String} relOrAbsPath - Relative or absolute path to resolve
* @param {Array} [whiteList] - A optional list of additional absolute path to whitelist
*
* @returns {String} The resolved path
*/
SecureFS.prototype.resolvePathSync = function (relOrAbsPath, whiteList) {
// Resolve the path from the working directory
const resolvedPath = this._resolve(relOrAbsPath, _.concat(this.fileWhitelist, whiteList));
if (!resolvedPath) {
throw new Error(PPERM_ERR);
}
return resolvedPath;
};
// Attach all functions in fs to postman-fs
Object.getOwnPropertyNames(fs).map((prop) => {
// Bail-out early to prevent fs module from logging warning for deprecated and experimental methods
if (prop === DEPRECATED_SYNC_WRITE_STREAM || prop === EXPERIMENTAL_PROMISE || typeof fs[prop] !== FUNCTION) {
return;
}
SecureFS.prototype[prop] = fs[prop];
});
// Override the required functions
SecureFS.prototype.stat = function (path, callback) {
this.resolvePath(path, (err, resolvedPath) => {
if (err) {
return callback(err);
}
return this._fs.stat(resolvedPath, callback);
});
};
SecureFS.prototype.createReadStream = function (path, options) {
try {
return this._fs.createReadStream(this.resolvePathSync(path), options);
}
catch (err) {
// Create a fake read steam that emits and error and
const ErrorReadStream = function () {
// Replicating behavior of fs module of disabling emitClose on destroy
Readable.call(this, { emitClose: false });
// Emit the error event with insure file access error
this.emit('error', new Error(PPERM_ERR));
// Options exists and disables autoClose then don't destroy
(options && !options.autoClose) || this.destroy();
};
util.inherits(ErrorReadStream, Readable);
return new ErrorReadStream();
}
};
module.exports = SecureFS;

408
node_modules/newman/lib/run/summary.js generated vendored Normal file
View File

@@ -0,0 +1,408 @@
var _ = require('lodash'),
sdk = require('postman-collection'),
SerialiseError = require('serialised-error'),
RunSummary;
/**
* Creates and returns a RunSummary instance for the current collection run.
*
* @constructor
* @param {EventEmitter} emitter - An EventEmitter instance with event handler attachments to add run information to.
* @param {Object} options - A set of run summary creation options.
*/
RunSummary = function RunSummary (emitter, options) {
// keep a copy of this instance since, we need to refer to this from various events
var summary = this;
// and store the trackers and failures in the summary object itself
_.assign(summary, /** @lends RunSummary.prototype */ {
/**
* The collection that is being executed.
*
* @type {Collection}
*/
collection: _.get(options, 'collection'),
/**
* The environment that is being used during the run
*
* @type {VariableScope}
*
*/
environment: _.get(options, 'environment'),
/**
* Global variables being used during the run
*
* @type {VariableScope}
*/
globals: _.get(options, 'globals'),
/**
* Holds information related to the run.
*/
run: {
/**
* Holds the statistics of the run. Each property in it is the item being tracked and has three numeric
* properties - total, failed, pending
*
* @type {Object.<Object>}
*/
stats: {
iterations: {},
items: {},
scripts: {},
prerequests: {},
requests: {},
tests: {},
assertions: {},
testScripts: {},
prerequestScripts: {}
},
/**
* Stores all generic timing information
*
* @type {Object}
*/
timings: {
/**
* The average response time of the run
*
* @type {number}
*/
responseAverage: 0,
/**
* The miminum response time of the run
*
* @type {number}
*/
responseMin: 0,
/**
* The maximum response time of the run
*
* @type {number}
*/
responseMax: 0,
/**
* Standard deviation of response time of the run
*
* @type {number}
*/
responseSd: 0,
/**
* The average DNS lookup time of the run
*
* @type {number}
*/
dnsAverage: 0,
/**
* The minimum DNS lookup time of the run
*
* @type {number}
*/
dnsMin: 0,
/**
* The maximum DNS lookup time of the run
*
* @type {number}
*/
dnsMax: 0,
/**
* Standard deviation of DNS lookup time of the run
*
* @type {number}
*/
dnsSd: 0,
/**
* The average first byte time of the run
*
* @type {number}
*/
firstByteAverage: 0,
/**
* The minimum first byte time of the run
*
* @type {number}
*/
firstByteMin: 0,
/**
* The maximum first byte time of the run
*
* @type {number}
*/
firstByteMax: 0,
/**
* Standard deviation of first byte time of the run
*
* @type {number}
*/
firstByteSd: 0
},
/**
* Stores detailed information about the order of execution, request, response and assertions
*
* @type {Array<Object>}
*/
executions: [],
/**
* Stores information on data transfer made during the collection
*
* @type {Object}
*/
transfers: {
/**
* The total data received as response to every request
*
* @type {number}
*/
responseTotal: 0
},
/**
* An array of all errors encountered during the run
*
* @type {Array.<Error>}
*/
failures: [],
/**
* This stores any fatal error during the run that caused the run to abort prematurely.
*
* @type {Error}
*/
error: null
}
});
// track run timings (start and end)
RunSummary.attachTimingTrackers(this, emitter);
// accumulate statistics on all event
// for all types of events track the counters for the event and its corresponding "before" counterpart
RunSummary.attachStatisticTrackers(this, emitter);
// accumulate statistics on requests - such as size and time
RunSummary.attachRequestTracker(this, emitter);
// accumulate errors (failures) from all events
RunSummary.attachFailureTrackers(this, emitter);
// accumulate all execution specific data in collection
RunSummary.attachReportingTrackers(this, emitter);
};
_.assign(RunSummary, {
attachReportingTrackers (summary, emitter) {
var cache = {},
executions = summary.run.executions;
emitter.on('beforeItem', function (err, o) {
if (err || !_.get(o, 'cursor.ref')) { return; }
cache[o.cursor.ref] = _.assignIn(cache[o.cursor.ref] || {}, {
cursor: o.cursor,
item: o.item
});
});
// save all responses in executions array
emitter.on('request', function (err, o) {
if (!_.get(o, 'cursor.ref')) { return; }
var execution = cache[o.cursor.ref] = (cache[o.cursor.ref] || {});
executions.push(_.assignIn(execution, {
cursor: o.cursor,
request: o.request,
response: o.response,
id: _.get(o, 'item.id')
}, err && {
requestError: err || undefined
}));
});
// save all script execution errors in each execution
emitter.on('script', function (err, o) {
if (!_.get(o, 'cursor.ref')) { return; }
var execution = cache[o.cursor.ref] = (cache[o.cursor.ref] || {}),
eventName = o && o.event && (o.event.listen + 'Script');
// store the script error corresponding to the script event name
err && (execution && eventName) && (execution[eventName] || (execution[eventName] = [])).push({
error: err
});
});
// save all assertions in each execution
emitter.on('assertion', function (err, o) {
if (!_.get(o, 'cursor.ref')) { return; }
var execution = cache[o.cursor.ref] = (cache[o.cursor.ref] || {});
if (!execution) { return; }
(execution.assertions || (execution.assertions = [])).push({
assertion: o.assertion,
skipped: o.skipped,
error: err || undefined
});
});
},
attachTimingTrackers (summary, emitter) {
// mark the point when the run started
// also mark the point when run completed and also store error if needed
emitter.on('start', function () { summary.run.timings.started = Date.now(); });
emitter.on('beforeDone', function () {
summary.run.timings.completed = Date.now();
});
emitter.on('done', function (err) {
err && (summary.error = err);
});
},
attachStatisticTrackers (summary, emitter) {
// accumulate statistics on all event
// for all types of events track the counters for the event and its corresponding "before" counterpart
_.forEach(summary.run.stats, function (tracker, name) {
// the actual event names are singular than their plural trackers, so we make the name singular
name = name.slice(0, -1); // remove last character
// populate initial values of trackers
_.assign(tracker, { total: 0, pending: 0, failed: 0 });
// Set up common listeners for a set of events, which tracks how many times they were executed and records
// the ones which had an error passed as first argument
emitter.on(_.camelCase('before-' + name), function () {
tracker.pending += 1;
});
emitter.on(name, function (err) {
// check pending so that, it does not negate for items that do not have a `before` counterpart
tracker.pending && (tracker.pending -= 1);
err && (tracker.failed += 1);
tracker.total += 1;
});
});
},
attachRequestTracker (summary, emitter) {
// accumulate statistics on requests
emitter.on('request', function (err, o) {
if (err || !(o && o.response)) { return; }
var size = _.isFunction(o.response.size) && o.response.size(),
time = o.response.responseTime,
requestCount = summary.run.stats.requests.total,
timings,
timingPhases;
// compute the response size total
size && (summary.run.transfers.responseTotal += (size.body || 0 + size.headers || 0));
// if there are redirects, get timings for the last request sent
timings = _.last(_.get(o, 'history.execution.data'));
timings = timings && timings.timings;
timingPhases = timings && sdk.Response.timingPhases(timings);
(timingPhases || time) && _.forEach([
'dns',
'firstByte',
'response'
], (value) => {
var currentValue = (value === 'response') ? time : (timingPhases && timingPhases[value]),
previousAverage = summary.run.timings[`${value}Average`],
previousVariance = Math.pow(summary.run.timings[`${value}Sd`], 2),
delta1 = currentValue - previousAverage,
delta2,
currentVariance;
if (!currentValue) { return; }
// compute average time for the given phase of request
summary.run.timings[`${value}Average`] =
(previousAverage * (requestCount - 1) + currentValue) / requestCount;
// compute minimum time for the given phase of request
if (!summary.run.timings[`${value}Min`]) {
summary.run.timings[`${value}Min`] = currentValue;
}
else {
summary.run.timings[`${value}Min`] =
Math.min(summary.run.timings[`${value}Min`], currentValue);
}
// compute maximum time the given phase of request
summary.run.timings[`${value}Max`] = Math.max(summary.run.timings[`${value}Max`], currentValue);
// compute standard deviation for the given phase of request
// refer Welford's online algorithm from
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
delta2 = currentValue - summary.run.timings[`${value}Average`];
currentVariance = (previousVariance * (requestCount - 1) + (delta1 * delta2)) / requestCount;
summary.run.timings[`${value}Sd`] = Math.sqrt(currentVariance);
});
});
},
attachFailureTrackers (summary, emitter) {
var eventsToTrack = ['beforeIteration', 'iteration', 'beforeItem', 'item', 'beforeScript', 'script',
'beforePrerequest', 'prerequest', 'beforeRequest', 'request', 'beforeTest', 'test', 'beforeAssertion',
'assertion'];
// accumulate failures of all events
// NOTE that surrogate events (which throw duplicate arguments) are not recorded
_.forEach(eventsToTrack, function (event) {
// push failures sent from "before" events
emitter.on(event, function (err, o) {
if (!err) { return; }
var item = o && o.item,
source = event;
// in case of user script error, point to the line and column of the script and its type
if (event === 'script') {
o.event && (source = o.event.listen + '-script');
if (err.stacktrace && err.stacktrace[0] && err.stacktrace[0].lineNumber) {
source += (':' + (err.stacktrace[0].lineNumber - 2));
err.stacktrace[0].columnNumber && (source += (':' + err.stacktrace[0].columnNumber));
}
}
// assertion errors need to know which assertion in the test was this
else if (event === 'assertion') {
_.has(err, 'index') && (source += (':' + err.index));
source += ' in test-script';
}
// if this is a plain error, convert it to serialised error
if (err.stack && !err.stacktrace) {
err = new SerialiseError(err, true);
}
summary.run.failures.push({
error: err,
at: source,
source: item || undefined,
parent: item && item.__parent && item.__parent.__parent || undefined,
cursor: o.cursor || {}
});
});
});
}
});
module.exports = RunSummary;

290
node_modules/newman/lib/util.js generated vendored Normal file
View File

@@ -0,0 +1,290 @@
var fs = require('fs'),
{ URL } = require('url'),
_ = require('lodash'),
chardet = require('chardet'),
filesize = require('filesize'),
prettyms = require('pretty-ms'),
liquidJSON = require('liquid-json'),
request = require('postman-request'),
util,
version = require('../package.json').version,
SEP = ' / ',
/**
* The auxiliary character used to prettify file sizes from raw byte counts.
*
* @type {Object}
*/
FILESIZE_OPTIONS = { spacer: '' },
/**
* Maps the charset returned by chardet to node buffer ones
*
* @constant
* @type {Object}
*/
CHARDET_BUFF_MAP = {
ASCII: 'ascii',
'UTF-8': 'utf8',
'UTF-16LE': 'utf16le',
'ISO-8859-1': 'latin1'
},
POSTMAN_API_HOST = 'api.getpostman.com',
POSTMAN_API_URL = 'https://' + POSTMAN_API_HOST,
/**
* Map of resource type and its equivalent API pathname.
*
* @type {Object}
*/
POSTMAN_API_PATH_MAP = {
collection: 'collections',
environment: 'environments'
},
API_KEY_HEADER = 'X-Api-Key',
USER_AGENT_VALUE = 'Newman/' + version,
// Matches valid Postman UID, case insensitive.
// Same used for validation on the Postman API side.
UID_REGEX = /^[0-9A-Z]+-[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}$/i;
util = {
/**
* The raw newman version, taken from package.json in the root directory
*
* @type {String}
*/
version: version,
/**
* The user agent that this newman identifies as.
*
* @type {String}
*/
userAgent: USER_AGENT_VALUE,
/**
* A utility helper method that prettifies and returns raw millisecond counts.
*
* @param {Number} ms - The raw millisecond count, usually from response times.
* @returns {String} - The prettified time, scaled to units of time, depending on the input value.
*/
prettyms: function (ms) {
if (ms < 1) {
return `${parseInt(ms * 1000, 10)}µs`;
}
return (ms < 1998) ? `${parseInt(ms, 10)}ms` : prettyms(ms || 0);
},
/**
* Returns the time object with all values in largest time unit possible as strings.
*
* @param {Object} obj - {event1: time1, event2: time2, ...} (time in milliseconds)
* @returns {Object} - {event1: time1, event2: time2, ...} (time in string with appropriate unit)
*/
beautifyTime: function (obj) {
return _.forEach(obj, (value, key) => {
// convert only non-zero values
value && (obj[key] = this.prettyms(value));
});
},
/**
* A utility helper method to prettify byte counts into human readable strings.
*
* @param {Number} bytes - The raw byte count, usually from computed response sizes.
* @returns {String} - The prettified size, suffixed with scaled units, depending on the actual value provided.
*/
filesize: function (bytes) {
return filesize(bytes || 0, FILESIZE_OPTIONS);
},
/**
* Resolves the fully qualified name for the provided item
*
* @param {PostmanItem|PostmanItemGroup} item The item for which to resolve the full name
* @param {?String} [separator=SEP] The separator symbol to join path name entries with
* @returns {String} The full name of the provided item, including prepended parent item names
* @private
*/
getFullName: function (item, separator) {
if (_.isEmpty(item) || !_.isFunction(item.parent) || !_.isFunction(item.forEachParent)) { return; }
var chain = [];
item.forEachParent(function (parent) { chain.unshift(parent.name || parent.id); });
item.parent() && chain.push(item.name || item.id); // Add the current item only if it is not the collection
return chain.join(_.isString(separator) ? separator : SEP);
},
/**
* Given a buffer, it tries to match relevant encoding of the buffer.
*
* @param {Buffer} buff - Buffer for which encoding needs to be determined
* @returns {String|undefined} - Detected encoding of the given buffer
*/
detectEncoding: function (buff) {
return CHARDET_BUFF_MAP[chardet.detect(buff)];
},
/**
* Loads JSON data from the given location.
*
* @param {String} type - The type of data to load.
* @param {String} location - Can be an HTTP URL, a local file path or an UID.
* @param {Object=} options - A set of options for JSON data loading.
* @param {Object} options.postmanApiKey - API Key used to load the resources via UID from the Postman API.
* @param {Function} callback - The function whose invocation marks the end of the JSON fetch routine.
* @returns {*}
*/
fetchJson: function (type, location, options, callback) {
!callback && _.isFunction(options) && (callback = options, options = {});
var postmanApiKey = _.get(options, 'postmanApiKey'),
headers = { 'User-Agent': USER_AGENT_VALUE };
// build API URL if `location` is a valid UID and api key is provided.
// Fetch from file in case a file with valid UID name is present.
if (!fs.existsSync(location) && POSTMAN_API_PATH_MAP[type] && postmanApiKey && UID_REGEX.test(location)) {
location = `${POSTMAN_API_URL}/${POSTMAN_API_PATH_MAP[type]}/${location}`;
headers[API_KEY_HEADER] = postmanApiKey;
}
return (/^https?:\/\/.*/).test(location) ?
// Load from URL
request.get({
url: location,
json: true,
headers: headers,
// Temporary fix to fetch the collection from https URL on Node v12
// @todo find the root cause in postman-request
// Refer: https://github.com/postmanlabs/newman/issues/1991
agentOptions: {
keepAlive: true
}
}, (err, response, body) => {
if (err) {
return callback(_.set(err, 'help', `unable to fetch data from url "${location}"`));
}
try {
_.isString(body) && (body = liquidJSON.parse(body.trim()));
}
catch (e) {
return callback(_.set(e, 'help', `the url "${location}" did not provide valid JSON data`));
}
var error,
urlObj,
resource = 'resource';
if (response.statusCode !== 200) {
urlObj = new URL(location);
(urlObj.hostname === POSTMAN_API_HOST) &&
(resource = _(urlObj.pathname).split('/').get(1).slice(0, -1) || resource);
error = new Error(_.get(body, 'error.message',
`Error fetching ${resource}, the provided URL returned status code: ${response.statusCode}`));
return callback(_.assign(error, {
name: _.get(body, 'error.name', _.capitalize(resource) + 'FetchError'),
help: `Error fetching the ${resource} from the provided URL. Ensure that the URL is valid.`
}));
}
return callback(null, body);
}) :
fs.readFile(location, function (err, value) {
if (err) {
return callback(_.set(err, 'help', `unable to read data from file "${location}"`));
}
try {
value = liquidJSON.parse(value.toString(util.detectEncoding(value)).trim());
}
catch (e) {
return callback(_.set(e, 'help', `the file at "${location}" does not contain valid JSON data`));
}
return callback(null, value);
});
},
/**
* Loads raw data from a location, useful for working with non JSON data such as CSV files.
*
* @param {String} location - The relative path / URL to the raw data file.
* @param {Object=} options - A set of load options for the raw data file.
* @param {Function} callback - The callback function whose invocation marks the end of the fetch routine.
* @returns {*}
*/
fetch: function (location, options, callback) {
!callback && _.isFunction(options) && (callback = options, options = {});
return (/^https?:\/\/.*/).test(location) ?
// Load from URL
request.get({ url: location }, (err, response, body) => {
if (err) {
return callback(err);
}
return callback(null, body);
}) :
fs.readFile(String(location), function (err, value) {
if (err) {
return callback(err);
}
return callback(null, value.toString(util.detectEncoding(value)));
});
},
/**
* Checks whether the given object is a v1 collection
*
* Reference: https://github.com/postmanlabs/postman-collection-transformer/blob/v2.6.2/lib/index.js#L44
*
* @param {Object} object - The Object to check for v1 collection compliance.
* @returns {Boolean} - A boolean result indicating whether or not the passed object was a v1 collection.
*/
isV1Collection: function (object) {
return Boolean(object && object.name && object.order && object.requests);
},
/**
* Helper function to test if a given string is an integer.
* Reference: [node-csv-parse]: https://github.com/adaltas/node-csv-parse/blob/v2.5.0/lib/index.js#L207
*
* @param {String} value - The string to test for.
* @returns {Boolean}
*/
isInt: function (value) {
return (/^(-|\+)?([1-9]+[0-9]*)$/).test(value);
},
/**
* Helper function to test if a given string is a float.
* Reference: [node-csv-parse]: https://github.com/adaltas/node-csv-parse/blob/v2.5.0/lib/index.js#L210
*
* @param {String} value - The string to test for.
* @returns {Boolean}
*/
isFloat: function (value) {
return (value - parseFloat(value) + 1) >= 0;
}
};
module.exports = util;