Node.js clustering is slower than single threaded mode
I was just experimenting with clustering in Node.js
. I wrote a little fibonacci
example, where I called the function using a clustered server and a non-clustered server as follows.
// fib.js
module.exports = function () {
const store =
store[0] = 0
store[1] = 1
return function fib(n) {
if (store[n] === undefined) {
store[n] = fib(n - 2) + fib(n - 1)
}
return store[n]
}
}
// non-clustered server
const http = require('http')
const fib = require('./fib.js')
const f = fib()
http.createServer((req, res) => {
res.writeHead(200)
res.end(`From ${process.pid}: ${f(30)}n`)
}).listen(8000)
// clustered server
const cluster = require('cluster')
const http = require('http')
const numCPUs = require('os').cpus().length
const fib = require('./fib.js')
const f = fib()
if (cluster.isMaster) {
for (let i = 0; i < numCPUs; i++) {
cluster.fork()
}
} else {
http.createServer((req, res) => {
res.writeHead(200)
res.end(`From ${process.pid}: ${f(30)}n`)
}).listen(8000)
}
Now, I tried benchmarking the performance of these two servers using artillery
but the non-clustered server seemed to be faster. How come?
// Clustered Server Report
Started phase 0, duration: 1s @ 07:53:24(+0000) 2018-11-23
Report @ 07:53:27(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 803.21
Request latency:
min: 0.3
max: 70.5
median: 20.7
p95: 38.7
p99: 45.6
Codes:
200: 2000
All virtual users finished
Summary report @ 07:53:27(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 796.81
Request latency:
min: 0.3
max: 70.5
median: 20.7
p95: 38.7
p99: 45.6
Scenario counts:
0: 50 (100%)
Codes:
200: 2000
// Non-clustered Server Report
Started phase 0, duration: 1s @ 07:53:39(+0000) 2018-11-23
Report @ 07:53:41(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 806.45
Request latency:
min: 0.3
max: 70.1
median: 21.4
p95: 38.3
p99: 44.6
Codes:
200: 2000
All virtual users finished
Summary report @ 07:53:41(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 800
Request latency:
min: 0.3
max: 70.1
median: 21.4
p95: 38.3
p99: 44.6
Scenario counts:
0: 50 (100%)
Codes:
200: 2000
node.js cluster-computing
add a comment |
I was just experimenting with clustering in Node.js
. I wrote a little fibonacci
example, where I called the function using a clustered server and a non-clustered server as follows.
// fib.js
module.exports = function () {
const store =
store[0] = 0
store[1] = 1
return function fib(n) {
if (store[n] === undefined) {
store[n] = fib(n - 2) + fib(n - 1)
}
return store[n]
}
}
// non-clustered server
const http = require('http')
const fib = require('./fib.js')
const f = fib()
http.createServer((req, res) => {
res.writeHead(200)
res.end(`From ${process.pid}: ${f(30)}n`)
}).listen(8000)
// clustered server
const cluster = require('cluster')
const http = require('http')
const numCPUs = require('os').cpus().length
const fib = require('./fib.js')
const f = fib()
if (cluster.isMaster) {
for (let i = 0; i < numCPUs; i++) {
cluster.fork()
}
} else {
http.createServer((req, res) => {
res.writeHead(200)
res.end(`From ${process.pid}: ${f(30)}n`)
}).listen(8000)
}
Now, I tried benchmarking the performance of these two servers using artillery
but the non-clustered server seemed to be faster. How come?
// Clustered Server Report
Started phase 0, duration: 1s @ 07:53:24(+0000) 2018-11-23
Report @ 07:53:27(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 803.21
Request latency:
min: 0.3
max: 70.5
median: 20.7
p95: 38.7
p99: 45.6
Codes:
200: 2000
All virtual users finished
Summary report @ 07:53:27(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 796.81
Request latency:
min: 0.3
max: 70.5
median: 20.7
p95: 38.7
p99: 45.6
Scenario counts:
0: 50 (100%)
Codes:
200: 2000
// Non-clustered Server Report
Started phase 0, duration: 1s @ 07:53:39(+0000) 2018-11-23
Report @ 07:53:41(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 806.45
Request latency:
min: 0.3
max: 70.1
median: 21.4
p95: 38.3
p99: 44.6
Codes:
200: 2000
All virtual users finished
Summary report @ 07:53:41(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 800
Request latency:
min: 0.3
max: 70.1
median: 21.4
p95: 38.3
p99: 44.6
Scenario counts:
0: 50 (100%)
Codes:
200: 2000
node.js cluster-computing
Are you sure thatf(30)
does enough work? Otherwise you won't get benefits from multiple processes, while a cluster itself has an overhead.
– estus
Nov 23 '18 at 8:21
add a comment |
I was just experimenting with clustering in Node.js
. I wrote a little fibonacci
example, where I called the function using a clustered server and a non-clustered server as follows.
// fib.js
module.exports = function () {
const store =
store[0] = 0
store[1] = 1
return function fib(n) {
if (store[n] === undefined) {
store[n] = fib(n - 2) + fib(n - 1)
}
return store[n]
}
}
// non-clustered server
const http = require('http')
const fib = require('./fib.js')
const f = fib()
http.createServer((req, res) => {
res.writeHead(200)
res.end(`From ${process.pid}: ${f(30)}n`)
}).listen(8000)
// clustered server
const cluster = require('cluster')
const http = require('http')
const numCPUs = require('os').cpus().length
const fib = require('./fib.js')
const f = fib()
if (cluster.isMaster) {
for (let i = 0; i < numCPUs; i++) {
cluster.fork()
}
} else {
http.createServer((req, res) => {
res.writeHead(200)
res.end(`From ${process.pid}: ${f(30)}n`)
}).listen(8000)
}
Now, I tried benchmarking the performance of these two servers using artillery
but the non-clustered server seemed to be faster. How come?
// Clustered Server Report
Started phase 0, duration: 1s @ 07:53:24(+0000) 2018-11-23
Report @ 07:53:27(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 803.21
Request latency:
min: 0.3
max: 70.5
median: 20.7
p95: 38.7
p99: 45.6
Codes:
200: 2000
All virtual users finished
Summary report @ 07:53:27(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 796.81
Request latency:
min: 0.3
max: 70.5
median: 20.7
p95: 38.7
p99: 45.6
Scenario counts:
0: 50 (100%)
Codes:
200: 2000
// Non-clustered Server Report
Started phase 0, duration: 1s @ 07:53:39(+0000) 2018-11-23
Report @ 07:53:41(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 806.45
Request latency:
min: 0.3
max: 70.1
median: 21.4
p95: 38.3
p99: 44.6
Codes:
200: 2000
All virtual users finished
Summary report @ 07:53:41(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 800
Request latency:
min: 0.3
max: 70.1
median: 21.4
p95: 38.3
p99: 44.6
Scenario counts:
0: 50 (100%)
Codes:
200: 2000
node.js cluster-computing
I was just experimenting with clustering in Node.js
. I wrote a little fibonacci
example, where I called the function using a clustered server and a non-clustered server as follows.
// fib.js
module.exports = function () {
const store =
store[0] = 0
store[1] = 1
return function fib(n) {
if (store[n] === undefined) {
store[n] = fib(n - 2) + fib(n - 1)
}
return store[n]
}
}
// non-clustered server
const http = require('http')
const fib = require('./fib.js')
const f = fib()
http.createServer((req, res) => {
res.writeHead(200)
res.end(`From ${process.pid}: ${f(30)}n`)
}).listen(8000)
// clustered server
const cluster = require('cluster')
const http = require('http')
const numCPUs = require('os').cpus().length
const fib = require('./fib.js')
const f = fib()
if (cluster.isMaster) {
for (let i = 0; i < numCPUs; i++) {
cluster.fork()
}
} else {
http.createServer((req, res) => {
res.writeHead(200)
res.end(`From ${process.pid}: ${f(30)}n`)
}).listen(8000)
}
Now, I tried benchmarking the performance of these two servers using artillery
but the non-clustered server seemed to be faster. How come?
// Clustered Server Report
Started phase 0, duration: 1s @ 07:53:24(+0000) 2018-11-23
Report @ 07:53:27(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 803.21
Request latency:
min: 0.3
max: 70.5
median: 20.7
p95: 38.7
p99: 45.6
Codes:
200: 2000
All virtual users finished
Summary report @ 07:53:27(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 796.81
Request latency:
min: 0.3
max: 70.5
median: 20.7
p95: 38.7
p99: 45.6
Scenario counts:
0: 50 (100%)
Codes:
200: 2000
// Non-clustered Server Report
Started phase 0, duration: 1s @ 07:53:39(+0000) 2018-11-23
Report @ 07:53:41(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 806.45
Request latency:
min: 0.3
max: 70.1
median: 21.4
p95: 38.3
p99: 44.6
Codes:
200: 2000
All virtual users finished
Summary report @ 07:53:41(+0000) 2018-11-23
Scenarios launched: 50
Scenarios completed: 50
Requests completed: 2000
RPS sent: 800
Request latency:
min: 0.3
max: 70.1
median: 21.4
p95: 38.3
p99: 44.6
Scenario counts:
0: 50 (100%)
Codes:
200: 2000
node.js cluster-computing
node.js cluster-computing
asked Nov 23 '18 at 7:57
Rajat SaxenaRajat Saxena
1,68623352
1,68623352
Are you sure thatf(30)
does enough work? Otherwise you won't get benefits from multiple processes, while a cluster itself has an overhead.
– estus
Nov 23 '18 at 8:21
add a comment |
Are you sure thatf(30)
does enough work? Otherwise you won't get benefits from multiple processes, while a cluster itself has an overhead.
– estus
Nov 23 '18 at 8:21
Are you sure that
f(30)
does enough work? Otherwise you won't get benefits from multiple processes, while a cluster itself has an overhead.– estus
Nov 23 '18 at 8:21
Are you sure that
f(30)
does enough work? Otherwise you won't get benefits from multiple processes, while a cluster itself has an overhead.– estus
Nov 23 '18 at 8:21
add a comment |
1 Answer
1
active
oldest
votes
The node cluster documentation says itself, that the worker distribution tends to be very unbalanced due to operating system scheduler vagaries, which affects performance. Also see this comparsion between the cluster
nginx
and iptables
load balancer.
AFAIK performance testing tends to be much more complicated if we want to get meaningfull results. Some mainly statistic related questions about your test:
Sample sizing: Why did you choose this sample size? Are you sure that the results will be the same with different sample sizes? Eg.: AFAIK the main advantage of
clustering
kicks off when the single thread process request handling capability ends.Testing environment: How did you run your test? In which environment? The environment was isolated? On how many core? What kind of processors?
Significance: Why do you think your result represents significant difference? How did you test the difference between the samples?
add a comment |
Your Answer
StackExchange.ifUsing("editor", function () {
StackExchange.using("externalEditor", function () {
StackExchange.using("snippets", function () {
StackExchange.snippets.init();
});
});
}, "code-snippets");
StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "1"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});
function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});
}
});
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53442671%2fnode-js-clustering-is-slower-than-single-threaded-mode%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
1 Answer
1
active
oldest
votes
1 Answer
1
active
oldest
votes
active
oldest
votes
active
oldest
votes
The node cluster documentation says itself, that the worker distribution tends to be very unbalanced due to operating system scheduler vagaries, which affects performance. Also see this comparsion between the cluster
nginx
and iptables
load balancer.
AFAIK performance testing tends to be much more complicated if we want to get meaningfull results. Some mainly statistic related questions about your test:
Sample sizing: Why did you choose this sample size? Are you sure that the results will be the same with different sample sizes? Eg.: AFAIK the main advantage of
clustering
kicks off when the single thread process request handling capability ends.Testing environment: How did you run your test? In which environment? The environment was isolated? On how many core? What kind of processors?
Significance: Why do you think your result represents significant difference? How did you test the difference between the samples?
add a comment |
The node cluster documentation says itself, that the worker distribution tends to be very unbalanced due to operating system scheduler vagaries, which affects performance. Also see this comparsion between the cluster
nginx
and iptables
load balancer.
AFAIK performance testing tends to be much more complicated if we want to get meaningfull results. Some mainly statistic related questions about your test:
Sample sizing: Why did you choose this sample size? Are you sure that the results will be the same with different sample sizes? Eg.: AFAIK the main advantage of
clustering
kicks off when the single thread process request handling capability ends.Testing environment: How did you run your test? In which environment? The environment was isolated? On how many core? What kind of processors?
Significance: Why do you think your result represents significant difference? How did you test the difference between the samples?
add a comment |
The node cluster documentation says itself, that the worker distribution tends to be very unbalanced due to operating system scheduler vagaries, which affects performance. Also see this comparsion between the cluster
nginx
and iptables
load balancer.
AFAIK performance testing tends to be much more complicated if we want to get meaningfull results. Some mainly statistic related questions about your test:
Sample sizing: Why did you choose this sample size? Are you sure that the results will be the same with different sample sizes? Eg.: AFAIK the main advantage of
clustering
kicks off when the single thread process request handling capability ends.Testing environment: How did you run your test? In which environment? The environment was isolated? On how many core? What kind of processors?
Significance: Why do you think your result represents significant difference? How did you test the difference between the samples?
The node cluster documentation says itself, that the worker distribution tends to be very unbalanced due to operating system scheduler vagaries, which affects performance. Also see this comparsion between the cluster
nginx
and iptables
load balancer.
AFAIK performance testing tends to be much more complicated if we want to get meaningfull results. Some mainly statistic related questions about your test:
Sample sizing: Why did you choose this sample size? Are you sure that the results will be the same with different sample sizes? Eg.: AFAIK the main advantage of
clustering
kicks off when the single thread process request handling capability ends.Testing environment: How did you run your test? In which environment? The environment was isolated? On how many core? What kind of processors?
Significance: Why do you think your result represents significant difference? How did you test the difference between the samples?
answered Nov 23 '18 at 10:52
lependulependu
724314
724314
add a comment |
add a comment |
Thanks for contributing an answer to Stack Overflow!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53442671%2fnode-js-clustering-is-slower-than-single-threaded-mode%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Are you sure that
f(30)
does enough work? Otherwise you won't get benefits from multiple processes, while a cluster itself has an overhead.– estus
Nov 23 '18 at 8:21