api: harden process limit
This commit is contained in:
parent
94d179762b
commit
9b1a9bf8b3
|
@ -1,13 +1,5 @@
|
||||||
FROM node:15.8.0-alpine3.13
|
FROM node:15.8.0-alpine3.13
|
||||||
RUN apk add --no-cache gnupg tar bash coreutils shadow
|
RUN apk add --no-cache gnupg tar bash coreutils util-linux
|
||||||
RUN for i in $(seq 1000 1500); do \
|
|
||||||
groupadd -g $i runner$i && \
|
|
||||||
useradd -M runner$i -g $i -u $i && \
|
|
||||||
echo "runner$i soft nproc 64" >> /etc/security/limits.conf && \
|
|
||||||
echo "runner$i hard nproc 64" >> /etc/security/limits.conf && \
|
|
||||||
echo "runner$i soft nofile 2048" >> /etc/security/limits.conf && \
|
|
||||||
echo "runner$i hard nofile 2048" >> /etc/security/limits.conf ;\
|
|
||||||
done
|
|
||||||
|
|
||||||
ENV NODE_ENV=production
|
ENV NODE_ENV=production
|
||||||
WORKDIR /piston_api
|
WORKDIR /piston_api
|
||||||
|
|
|
@ -70,7 +70,14 @@ class Job {
|
||||||
|
|
||||||
async safe_call(file, args, timeout){
|
async safe_call(file, args, timeout){
|
||||||
return await new Promise((resolve, reject) => {
|
return await new Promise((resolve, reject) => {
|
||||||
const proc_call = ['unshare','-n','-r','bash',file, ...args].slice(!config.enable_unshare*3)
|
const unshare = config.enable_unshare ? ['unshare','-n','-r'] : [];
|
||||||
|
const prlimit = ['prlimit','--nproc=64'];
|
||||||
|
|
||||||
|
const proc_call = [
|
||||||
|
...prlimit,
|
||||||
|
...unshare,
|
||||||
|
'bash',file, ...args
|
||||||
|
];
|
||||||
var stdout = '';
|
var stdout = '';
|
||||||
var stderr = '';
|
var stderr = '';
|
||||||
const proc = cp.spawn(proc_call[0], proc_call.splice(1) ,{
|
const proc = cp.spawn(proc_call[0], proc_call.splice(1) ,{
|
||||||
|
@ -88,23 +95,25 @@ class Job {
|
||||||
|
|
||||||
proc.stderr.on('data', d=>{if(stderr.length>config.output_max_size) proc.kill('SIGKILL'); else stderr += d;});
|
proc.stderr.on('data', d=>{if(stderr.length>config.output_max_size) proc.kill('SIGKILL'); else stderr += d;});
|
||||||
proc.stdout.on('data', d=>{if(stdout.length>config.output_max_size) proc.kill('SIGKILL'); else stdout += d;});
|
proc.stdout.on('data', d=>{if(stdout.length>config.output_max_size) proc.kill('SIGKILL'); else stdout += d;});
|
||||||
function exitCleanup(){
|
function exit_cleanup(){
|
||||||
clearTimeout(kill_timeout);
|
clearTimeout(kill_timeout);
|
||||||
proc.stderr.destroy()
|
proc.stderr.destroy();
|
||||||
proc.stdout.destroy()
|
proc.stdout.destroy();
|
||||||
try{
|
try{
|
||||||
process.kill(-proc.pid, 'SIGKILL')
|
process.kill(-proc.pid, 'SIGKILL');
|
||||||
}catch{} //Probably already dead!
|
}catch{
|
||||||
|
// Process will be dead alread, so nothing to kill.
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.on('exit', (code, signal)=>{
|
proc.on('exit', (code, signal)=>{
|
||||||
exitCleanup()
|
exit_cleanup();
|
||||||
|
|
||||||
resolve({stdout, stderr, code, signal});
|
resolve({stdout, stderr, code, signal});
|
||||||
});
|
});
|
||||||
|
|
||||||
proc.on('error', (err) => {
|
proc.on('error', (err) => {
|
||||||
exitCleanup()
|
exit_cleanup();
|
||||||
|
|
||||||
reject({error: err, stdout, stderr});
|
reject({error: err, stdout, stderr});
|
||||||
});
|
});
|
||||||
|
@ -118,14 +127,14 @@ class Job {
|
||||||
const compile = this.runtime.compiled && await this.safe_call(
|
const compile = this.runtime.compiled && await this.safe_call(
|
||||||
path.join(this.runtime.pkgdir, 'compile'),
|
path.join(this.runtime.pkgdir, 'compile'),
|
||||||
[this.main, ...this.files],
|
[this.main, ...this.files],
|
||||||
this.timeouts.compile)
|
this.timeouts.compile);
|
||||||
|
|
||||||
logger.debug('Running');
|
logger.debug('Running');
|
||||||
|
|
||||||
const run = await this.safe_call(
|
const run = await this.safe_call(
|
||||||
path.join(this.runtime.pkgdir, 'run'),
|
path.join(this.runtime.pkgdir, 'run'),
|
||||||
[this.main, ...this.args],
|
[this.main, ...this.args],
|
||||||
this.timeouts.run)
|
this.timeouts.run);
|
||||||
|
|
||||||
this.state = job_states.EXECUTED;
|
this.state = job_states.EXECUTED;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue