Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'NotOrdered': function NotOrdered(event, commit, deps){
if(event.eventType === 'OrderPlaced'){
// Start packaging the order from which the event came.
return deps.startPackaging(commit.sequenceID).then(function(){
return when.resolve(new Event('OrderPlaced', {
orderID: commit.sequenceID
}));
});
}
else{
return when.reject();
}
},
'Ordered': function Ordered(event, commit, deps){
// node modules
var fs = require('fs');
var os = require('os');
var path = require('path');
// 3rd party modules
var execa = require('execa');
var when = require('when');
var whenNode = require('when/node');
// implementation
var mkdir = whenNode.lift(fs.mkdir);
var readFile = whenNode.lift(fs.readFile);
var rimraf = whenNode.lift(require('rimraf'));
// public
module.exports = {
setNpmVersion: function (npmVersion) {
return execa('npm', ['install', '-g', 'npm@' + npmVersion]);
},
getNpmVersion: function () {
return execa.stdout('npm', ['-v']);
},
createTestApp: function (dirPath, packagePath) {
return mkdir(dirPath).then(function () {
return when.promise(function (res, rej) {
var read = fs.createReadStream(packagePath);
var write = fs.createWriteStream(path.join(dirPath, 'package.json'));
return when.try(self._eventSink.sink.bind(self._eventSink), commitObject).then(function _commitSinkSucceeded(result) {
// Now that the commit is sunk, we can clear the event staging area - new events will end up in subsequent commits.
self._stagedEvents = [];
self._updateSequenceNumber(commitObject.sequenceSlot);
//NOTE: The check/log emission below is a good candidate for refactoring into Aspect-Oriented Programming.
// ESDF Core does not support AOP as of now, though.
if(self._IOObserver){
self._IOObserver.emit('CommitSinkSuccess', {
commitObject: commitObject
});
}
// Now that the commit has been saved, we proceed to save a snapshot if the snapshotting strategy tells us to (and we have a snapshot save provider).
// Note that _snapshotStrategy is called with "this" set to the current aggregate, which makes it behave like a private method.
if (self.supportsSnapshotGeneration() && self._snapshotter && self._snapshotStrategy && self._snapshotStrategy(commitObject)) {
when.try(self._saveSnapshot.bind(self)).catch(function(error) {
//TODO: We should not be using console directly, but there is currently
// no way to inject a custom logger.
console.error('Error saving snapshot for %s [%s]: %s', self._aggregateID, self._aggregateType, error);
});
// Since saving a snapshot is never mandatory for correct operation of an event-sourced application, we do not have to react to errors.
}
return result;
}, function _commitSinkFailed(reason) {
// Sink failed - do nothing. An upper layer can either retry the sinking, or reload the aggregate and retry (in the latter case, the sequence number will probably get refreshed).
bootstrap: function(argv) {
var projectRootDir = getProjectRootDir();
if (projectRootDir !== null) {
console.log(chalk.red('Initialization Error') + ' - Can\'t create new firebase app ' +
'inside an existing firebase project');
process.exit(1);
}
_when.join(this.getTemplates(), auth.listFirebases(argv)).done(function(resultSet) {
var supportedTemplates = resultSet[0],
res = resultSet[1];
if (res.firebases.length === 0) {
console.log(chalk.yellow('You have no apps in your Firebase account'));
console.log('Sign in to %s and create an app', chalk.cyan('https://firebase.com'));
console.log('then initialize a directory for hosting');
process.exit(1);
}
// Firebase names always a subset of ^[0-9a-z-]*$ so safe to regex
var templateList = Object.keys(supportedTemplates).sort();
var firebasePattern = new RegExp('^(' + res.firebases.join('|') + ')$');
var templatePattern = new RegExp('^(' + templateList.join('|') + ')$');
if (!argv.firebase || (typeof(argv.firebase) !== 'string') || !argv.firebase.match(firebasePattern)) {
res.showFirebases();
add : function(props) {
var self = this;
var def = when.defer();
var checkExistance = when.defer();
var tagDef = when.defer();
try {
// if we have an original url, make sure it's a url
if (props.original_url && props.original_url.length) {
assert(Validator.isURL(props.original_url), "original_url isn't a URL");
// check if this already exists, based on URL. if it does, exit.
self.forge({ original_url : props.original_url }).fetch().then(function(submission) {
console.log(submission);
if (typeof submission !== "undefined" && submission !== null) {
checkExistance.reject("This example already exists. Id " + submission.id);
} else {
checkExistance.resolve();
}
});
} catch (err) {
service.logger.error('Post Start Init Service Error:', err);
return when.reject(err);
}
}
//
result = service.resources.postStartInit();
service._promiseQueue.push(result);
// wait for Q'd resources to resolve before letting service resolve
if (service._promiseQueue.length) {
service.logger.info('Wait Post Start Init...');
// TODO: need timeout in case resource promise never resolves
return when.all(service._promiseQueue).then(function () {
delete service._promiseQueue;
service.logger.info('Loaded');
service.logger.groupEnd(' ');
});
} else {
service.logger.groupEnd(' ');
return 1;
}
}.bind(this)).then(function (result) {
logger.groupEnd('Done Running All Post Start Inits');
.then(webhooks => {
// remove all webhooks on stop
if(!flint.options.removeWebhooksOnStart) {
var webhooksToRemove = _.filter(webhooks, webhook => {
return (webhook.name == u.base64encode(flint.options.webhookUrl.split('/')[2] + ' ' + flint.email));
});
if(webhooksToRemove instanceof Array && webhooksToRemove.length > 0) {
return when.map(webhooksToRemove, webhook => flint.spark.webhookRemove(webhook.id))
.then(() => when(true))
.catch(() => when(true));
} else {
return when(true);
}
}
// else, only remove webhooks this app created
else {
return when.map(webhooks, webhook => flint.spark.webhookRemove(webhook.id))
.then(() => when(true))
.catch(() => when(true));
}
});
} else {
return when.promise(function(resolve,reject) {
var promises = [];
whenNode.call(fs.readdir,dir).done(function(files) {
files = files.sort();
files.forEach(function(fn) {
var stats = fs.statSync(path.join(dir,fn));
if (stats.isFile()) {
if (/\.js$/.test(fn)) {
promises.push(when.promise(function(resolve,reject) {
loadNode(dir,fn).then(resolve, function(err) {
resolve({'fn':fn,err:err});
});
}));
}
} else if (stats.isDirectory()) {
// Ignore /.dirs/, /lib/ /node_modules/
if (!/^(\..*|lib|icons|node_modules|test)$/.test(fn)) {
promises.push(when.promise(function(resolve,reject) {
loadNodes(path.join(dir,fn)).then(function(errs) {
readable._read = function _read(length) {
var self = this;
when.try(reader, sequenceID, currentOffset).then(function(commits) {
if (!Array.isArray(commits) && commits !== null) {
throw new Error('Expected an array of Commit objects, but got an unknown type from the commit reader function');
}
// Move forward in the stream:
if (Array.isArray(commits)) {
currentOffset += commits.length;
}
// Only perform one push(). Multiple pushes from one _read() are apparently a sure-fire way to cause a huge memory leak.
self.push(commits);
}).catch(function(error) {
self.emit('error', error);
});
};
}
}).then(function runUserFunction(loadingResult) {
var aggregateInstance = loadingResult.instance;
var stagedCommit;
return when.try(userFunction, aggregateInstance).then(function saveAggregateState(userFunctionResult) {
// Get the events staged by the aggregate root in course of execution and eventually append them to the result if requested.
try {
stagedCommit = aggregateInstance.getCommit(options.commitMetadata || {});
} catch (commitConstructionError) {
// no-op: we've failed to construct the commit; maybe the aggregate
// root instance has no ID assigned? Anyway, commit() will either
// fail or succeed without writing anything (0 events). This is not
// a common case, but a useful one when loading "dummy entities"
// which are guaranteed to be in their initial state. This also
// ensures compatibility with esdf 0.1.x.
}
// Actually commit:
return when.try(aggregateInstance.commit.bind(aggregateInstance), options.commitMetadata || {}).then(function _buildOutput() {
// If the caller has requested an "advanced format" result, pass the data through to them, enriched with the result of the user function.
if (options.advanced) {