To explain my situation. I have a pm2 cron script that I run using:
pm2 start clear-redis-state-cron.js -n clearState --cron '0 0/1 * 1/1 * *'
This runs the js script called clear-redis-state-cron.js
just fine.
The role of this script is to stop process p1
and process p2
. It then runs a lua redis script that clears some keys from the database. This all works fine but I've put it here for brevity.
var crs = require('./clear-redis-state'),
pm2 = require('pm2');
pm2.connect(function(err) {
pm2.stop('ponent1');
pm2.stop('ponent2');
crs.clear();
pm2.restart(__dirname + '/../../node_modules/ponent1/index.js', { name: 'c1' }, function (err, proc) {
if (err) throw new Error('err');
});
pm2.restart(__dirname + '/../../node_modules/ponent2/index.js', { name: 'c2' }, function (err, proc) {
if (err) throw new Error('err');
});
});
It runs a clear()
js function which is defined here:
var config = require('mon/lib/config'),
log = require('mon/lib/logger'),
redis = require('mon/lib/redis'),
ScriptTo = require('redis-scripto');
exports.clear = function() {
log.init();
if (!config.isSet()) {
// Use local config
var configPath = require('path').join(__dirname, '../../app/config');
config.load(configPath);
}
redis.init(config.get('redis'));
var scriptManager = new ScriptTo(redis.getClient());
scriptManager.loadFromDir(__dirname + '/scripts');
scriptManager.run('clear-state', [], [], function(err, results) {
logError(err);
console.log('results:', results);
});
function logError(err) {
if (err !== null) {
console.log('Error loading lua script merge-keys: ', err);
}
};
}
I have no problems with that. However, it seems to crash on start. Let's say I already have pm2 running two processes ponent1 and ponent2 called p1 and p2 respectively. Why would I get the following error when starting the cron when I run it with --no-daemon
?
... clear-redis-state-cron.js had too many unstable restarts (15). Stopped. "errored"
My hunch is that either the process is starting up shutting down incorrectly and is in the wrong state as a result so when it tries to close it it's already closed, but because pm2 assumes something went wrong the cron process is stopped.
Any ideas what I might be doing wrong?
Edit: I tried promisifying my shutdown pm2 logic like so:
pm2.connect(function(err) {
Promise.resolve(ops.stop('ponent1'))
.then(ops.stop('ponent2'))
.then(ops.clear)
.then(ops.restart(__dirname + '/../../node_modules/ponent1/index.js', { name: 'ponent1' }))
.then(ops.restart(__dirname + '/../../node_modules/ponent2/index.js', { name: 'ponent2' }))
.catch(logFailureToStop);
});
var logFailureToStop = function (err) {
console.log('Failed to stop ', err);
};
With following result after stopping processes that are running:
$ pm2 list
┌───────────┬────┬──────┬───────┬─────────┬───────────┬────────┬─────────────┬──────────┐
│ App name │ id │ mode │ PID │ status │ restarted │ uptime │ memory │ watching │
├───────────┼────┼──────┼───────┼─────────┼───────────┼────────┼─────────────┼──────────┤
│ ponent2│ 0 │ fork │ 0 │ stopped │ 17 │ 0 │ 0 B │ disabled │
│ ponent1│ 1 │ fork │ 18769 │ online │ 31 │ 19s │ 30.539 MB │ disabled │
To explain my situation. I have a pm2 cron script that I run using:
pm2 start clear-redis-state-cron.js -n clearState --cron '0 0/1 * 1/1 * *'
This runs the js script called clear-redis-state-cron.js
just fine.
The role of this script is to stop process p1
and process p2
. It then runs a lua redis script that clears some keys from the database. This all works fine but I've put it here for brevity.
var crs = require('./clear-redis-state'),
pm2 = require('pm2');
pm2.connect(function(err) {
pm2.stop('ponent1');
pm2.stop('ponent2');
crs.clear();
pm2.restart(__dirname + '/../../node_modules/ponent1/index.js', { name: 'c1' }, function (err, proc) {
if (err) throw new Error('err');
});
pm2.restart(__dirname + '/../../node_modules/ponent2/index.js', { name: 'c2' }, function (err, proc) {
if (err) throw new Error('err');
});
});
It runs a clear()
js function which is defined here:
var config = require('mon/lib/config'),
log = require('mon/lib/logger'),
redis = require('mon/lib/redis'),
ScriptTo = require('redis-scripto');
exports.clear = function() {
log.init();
if (!config.isSet()) {
// Use local config
var configPath = require('path').join(__dirname, '../../app/config');
config.load(configPath);
}
redis.init(config.get('redis'));
var scriptManager = new ScriptTo(redis.getClient());
scriptManager.loadFromDir(__dirname + '/scripts');
scriptManager.run('clear-state', [], [], function(err, results) {
logError(err);
console.log('results:', results);
});
function logError(err) {
if (err !== null) {
console.log('Error loading lua script merge-keys: ', err);
}
};
}
I have no problems with that. However, it seems to crash on start. Let's say I already have pm2 running two processes ponent1 and ponent2 called p1 and p2 respectively. Why would I get the following error when starting the cron when I run it with --no-daemon
?
... clear-redis-state-cron.js had too many unstable restarts (15). Stopped. "errored"
My hunch is that either the process is starting up shutting down incorrectly and is in the wrong state as a result so when it tries to close it it's already closed, but because pm2 assumes something went wrong the cron process is stopped.
Any ideas what I might be doing wrong?
Edit: I tried promisifying my shutdown pm2 logic like so:
pm2.connect(function(err) {
Promise.resolve(ops.stop('ponent1'))
.then(ops.stop('ponent2'))
.then(ops.clear)
.then(ops.restart(__dirname + '/../../node_modules/ponent1/index.js', { name: 'ponent1' }))
.then(ops.restart(__dirname + '/../../node_modules/ponent2/index.js', { name: 'ponent2' }))
.catch(logFailureToStop);
});
var logFailureToStop = function (err) {
console.log('Failed to stop ', err);
};
With following result after stopping processes that are running:
$ pm2 list
┌───────────┬────┬──────┬───────┬─────────┬───────────┬────────┬─────────────┬──────────┐
│ App name │ id │ mode │ PID │ status │ restarted │ uptime │ memory │ watching │
├───────────┼────┼──────┼───────┼─────────┼───────────┼────────┼─────────────┼──────────┤
│ ponent2│ 0 │ fork │ 0 │ stopped │ 17 │ 0 │ 0 B │ disabled │
│ ponent1│ 1 │ fork │ 18769 │ online │ 31 │ 19s │ 30.539 MB │ disabled │
Share
Improve this question
edited Feb 6, 2015 at 18:11
James Murphy
asked Feb 6, 2015 at 16:04
James MurphyJames Murphy
7981 gold badge15 silver badges31 bronze badges
1 Answer
Reset to default 4Managed to resolve this issue in the end.
The issue was caused because I had an on handler event listening into SIGTERM. Apparently these interfere with PM2 so you need to instead use the gracefulStop/gracefulRestart mands.
See: https://github./Unitech/PM2/issues/304