Compare commits
10 Commits
29a8edd76a
...
d6845063f8
Author | SHA1 | Date |
---|---|---|
Amarjeet Anand | d6845063f8 | |
Omar Brikaa | 4e361dcf92 | |
Omar Brikaa | 512b63d2b5 | |
Omar Brikaa | 24c5c05308 | |
Omar Brikaa | 47661343da | |
Omar Brikaa | 40b8598d2d | |
Omar Brikaa | c4afd97a38 | |
Omar Brikaa | ecdced9ee7 | |
Omar Brikaa | a99ce9ae47 | |
Omar Brikaa | bd42fe3357 |
|
@ -92,7 +92,7 @@ jobs:
|
||||||
docker run -v $(pwd)'/repo:/piston/repo' -v $(pwd)'/packages:/piston/packages' -d --name repo docker.pkg.github.com/engineer-man/piston/repo-builder --no-build
|
docker run -v $(pwd)'/repo:/piston/repo' -v $(pwd)'/packages:/piston/packages' -d --name repo docker.pkg.github.com/engineer-man/piston/repo-builder --no-build
|
||||||
docker pull docker.pkg.github.com/engineer-man/piston/api
|
docker pull docker.pkg.github.com/engineer-man/piston/api
|
||||||
docker build -t piston-api api
|
docker build -t piston-api api
|
||||||
docker run --network container:repo -v $(pwd)'/data:/piston' -e PISTON_LOG_LEVEL=DEBUG -e 'PISTON_REPO_URL=http://localhost:8000/index' -d --name api piston-api
|
docker run --privileged --network container:repo -v $(pwd)'/data:/piston' -e PISTON_LOG_LEVEL=DEBUG -e 'PISTON_REPO_URL=http://localhost:8000/index' -d --name api piston-api
|
||||||
echo Waiting for API to start..
|
echo Waiting for API to start..
|
||||||
docker run --network container:api appropriate/curl -s --retry 10 --retry-connrefused http://localhost:2000/api/v2/runtimes
|
docker run --network container:api appropriate/curl -s --retry 10 --retry-connrefused http://localhost:2000/api/v2/runtimes
|
||||||
|
|
||||||
|
|
|
@ -1,20 +1,29 @@
|
||||||
|
FROM buildpack-deps:buster AS isolate
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends git libcap-dev && \
|
||||||
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
|
git clone https://github.com/envicutor/isolate.git /tmp/isolate/ && \
|
||||||
|
cd /tmp/isolate && \
|
||||||
|
git checkout af6db68042c3aa0ded80787fbb78bc0846ea2114 && \
|
||||||
|
make -j$(nproc) install && \
|
||||||
|
rm -rf /tmp/*
|
||||||
|
|
||||||
FROM node:15.10.0-buster-slim
|
FROM node:15.10.0-buster-slim
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN dpkg-reconfigure -p critical dash
|
RUN dpkg-reconfigure -p critical dash
|
||||||
RUN for i in $(seq 1001 1500); do \
|
|
||||||
groupadd -g $i runner$i && \
|
|
||||||
useradd -M runner$i -g $i -u $i ; \
|
|
||||||
done
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y libxml2 gnupg tar coreutils util-linux libc6-dev \
|
apt-get install -y libxml2 gnupg tar coreutils util-linux libc6-dev \
|
||||||
binutils build-essential locales libpcre3-dev libevent-dev libgmp3-dev \
|
binutils build-essential locales libpcre3-dev libevent-dev libgmp3-dev \
|
||||||
libncurses6 libncurses5 libedit-dev libseccomp-dev rename procps python3 \
|
libncurses6 libncurses5 libedit-dev libseccomp-dev rename procps python3 \
|
||||||
libreadline-dev libblas-dev liblapack-dev libpcre3-dev libarpack2-dev \
|
libreadline-dev libblas-dev liblapack-dev libpcre3-dev libarpack2-dev \
|
||||||
libfftw3-dev libglpk-dev libqhull-dev libqrupdate-dev libsuitesparse-dev \
|
libfftw3-dev libglpk-dev libqhull-dev libqrupdate-dev libsuitesparse-dev \
|
||||||
libsundials-dev libpcre2-dev && \
|
libsundials-dev libpcre2-dev libcap-dev && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
RUN useradd -M piston
|
||||||
|
COPY --from=isolate /usr/local/bin/isolate /usr/local/bin
|
||||||
|
COPY --from=isolate /usr/local/etc/isolate /usr/local/etc/isolate
|
||||||
|
|
||||||
RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen
|
RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen
|
||||||
|
|
||||||
|
@ -23,7 +32,5 @@ COPY ["package.json", "package-lock.json", "./"]
|
||||||
RUN npm install
|
RUN npm install
|
||||||
COPY ./src ./src
|
COPY ./src ./src
|
||||||
|
|
||||||
RUN make -C ./src/nosocket/ all && make -C ./src/nosocket/ install
|
CMD ["/piston_api/src/docker-entrypoint.sh"]
|
||||||
|
|
||||||
CMD [ "node", "src"]
|
|
||||||
EXPOSE 2000/tcp
|
EXPOSE 2000/tcp
|
||||||
|
|
|
@ -6,50 +6,9 @@ const events = require('events');
|
||||||
const runtime = require('../runtime');
|
const runtime = require('../runtime');
|
||||||
const { Job } = require('../job');
|
const { Job } = require('../job');
|
||||||
const package = require('../package');
|
const package = require('../package');
|
||||||
|
const globals = require('../globals');
|
||||||
const logger = require('logplease').create('api/v2');
|
const logger = require('logplease').create('api/v2');
|
||||||
|
|
||||||
const SIGNALS = [
|
|
||||||
'SIGABRT',
|
|
||||||
'SIGALRM',
|
|
||||||
'SIGBUS',
|
|
||||||
'SIGCHLD',
|
|
||||||
'SIGCLD',
|
|
||||||
'SIGCONT',
|
|
||||||
'SIGEMT',
|
|
||||||
'SIGFPE',
|
|
||||||
'SIGHUP',
|
|
||||||
'SIGILL',
|
|
||||||
'SIGINFO',
|
|
||||||
'SIGINT',
|
|
||||||
'SIGIO',
|
|
||||||
'SIGIOT',
|
|
||||||
'SIGKILL',
|
|
||||||
'SIGLOST',
|
|
||||||
'SIGPIPE',
|
|
||||||
'SIGPOLL',
|
|
||||||
'SIGPROF',
|
|
||||||
'SIGPWR',
|
|
||||||
'SIGQUIT',
|
|
||||||
'SIGSEGV',
|
|
||||||
'SIGSTKFLT',
|
|
||||||
'SIGSTOP',
|
|
||||||
'SIGTSTP',
|
|
||||||
'SIGSYS',
|
|
||||||
'SIGTERM',
|
|
||||||
'SIGTRAP',
|
|
||||||
'SIGTTIN',
|
|
||||||
'SIGTTOU',
|
|
||||||
'SIGUNUSED',
|
|
||||||
'SIGURG',
|
|
||||||
'SIGUSR1',
|
|
||||||
'SIGUSR2',
|
|
||||||
'SIGVTALRM',
|
|
||||||
'SIGXCPU',
|
|
||||||
'SIGXFSZ',
|
|
||||||
'SIGWINCH',
|
|
||||||
];
|
|
||||||
// ref: https://man7.org/linux/man-pages/man7/signal.7.html
|
|
||||||
|
|
||||||
function get_job(body) {
|
function get_job(body) {
|
||||||
let {
|
let {
|
||||||
language,
|
language,
|
||||||
|
@ -61,6 +20,8 @@ function get_job(body) {
|
||||||
run_memory_limit,
|
run_memory_limit,
|
||||||
run_timeout,
|
run_timeout,
|
||||||
compile_timeout,
|
compile_timeout,
|
||||||
|
run_cpu_time,
|
||||||
|
compile_cpu_time,
|
||||||
} = body;
|
} = body;
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
|
@ -106,7 +67,7 @@ function get_job(body) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const constraint of ['memory_limit', 'timeout']) {
|
for (const constraint of ['memory_limit', 'timeout', 'cpu_time']) {
|
||||||
for (const type of ['compile', 'run']) {
|
for (const type of ['compile', 'run']) {
|
||||||
const constraint_name = `${type}_${constraint}`;
|
const constraint_name = `${type}_${constraint}`;
|
||||||
const constraint_value = body[constraint_name];
|
const constraint_value = body[constraint_name];
|
||||||
|
@ -135,23 +96,23 @@ function get_job(body) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
compile_timeout = compile_timeout || rt.timeouts.compile;
|
|
||||||
run_timeout = run_timeout || rt.timeouts.run;
|
|
||||||
compile_memory_limit = compile_memory_limit || rt.memory_limits.compile;
|
|
||||||
run_memory_limit = run_memory_limit || rt.memory_limits.run;
|
|
||||||
resolve(
|
resolve(
|
||||||
new Job({
|
new Job({
|
||||||
runtime: rt,
|
runtime: rt,
|
||||||
args: args || [],
|
args: args ?? [],
|
||||||
stdin: stdin || '',
|
stdin: stdin ?? '',
|
||||||
files,
|
files,
|
||||||
timeouts: {
|
timeouts: {
|
||||||
run: run_timeout,
|
run: run_timeout ?? rt.timeouts.run,
|
||||||
compile: compile_timeout,
|
compile: compile_timeout ?? rt.timeouts.compile,
|
||||||
|
},
|
||||||
|
cpu_times: {
|
||||||
|
run: run_cpu_time ?? rt.cpu_times.run,
|
||||||
|
compile: compile_cpu_time ?? rt.cpu_times.compile,
|
||||||
},
|
},
|
||||||
memory_limits: {
|
memory_limits: {
|
||||||
run: run_memory_limit,
|
run: run_memory_limit ?? rt.memory_limits.run,
|
||||||
compile: compile_memory_limit,
|
compile: compile_memory_limit ?? rt.memory_limits.compile,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
@ -211,7 +172,7 @@ router.ws('/connect', async (ws, req) => {
|
||||||
job = await get_job(msg);
|
job = await get_job(msg);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await job.prime();
|
const box = await job.prime();
|
||||||
|
|
||||||
ws.send(
|
ws.send(
|
||||||
JSON.stringify({
|
JSON.stringify({
|
||||||
|
@ -221,7 +182,7 @@ router.ws('/connect', async (ws, req) => {
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
await job.execute(event_bus);
|
await job.execute(box, event_bus);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(
|
logger.error(
|
||||||
`Error cleaning up job: ${job.uuid}:\n${error}`
|
`Error cleaning up job: ${job.uuid}:\n${error}`
|
||||||
|
@ -248,7 +209,9 @@ router.ws('/connect', async (ws, req) => {
|
||||||
break;
|
break;
|
||||||
case 'signal':
|
case 'signal':
|
||||||
if (job !== null) {
|
if (job !== null) {
|
||||||
if (SIGNALS.includes(msg.signal)) {
|
if (
|
||||||
|
Object.values(globals.SIGNALS).includes(msg.signal)
|
||||||
|
) {
|
||||||
event_bus.emit('signal', msg.signal);
|
event_bus.emit('signal', msg.signal);
|
||||||
} else {
|
} else {
|
||||||
ws.close(4005, 'Invalid signal');
|
ws.close(4005, 'Invalid signal');
|
||||||
|
@ -279,9 +242,9 @@ router.post('/execute', async (req, res) => {
|
||||||
return res.status(400).json(error);
|
return res.status(400).json(error);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
await job.prime();
|
const box = await job.prime();
|
||||||
|
|
||||||
let result = await job.execute();
|
let result = await job.execute(box);
|
||||||
// Backward compatibility when the run stage is not started
|
// Backward compatibility when the run stage is not started
|
||||||
if (result.run === undefined) {
|
if (result.run === undefined) {
|
||||||
result.run = result.compile;
|
result.run = result.compile;
|
||||||
|
|
|
@ -90,6 +90,18 @@ const options = {
|
||||||
parser: parse_int,
|
parser: parse_int,
|
||||||
validators: [(x, raw) => !is_nan(x) || `${raw} is not a number`],
|
validators: [(x, raw) => !is_nan(x) || `${raw} is not a number`],
|
||||||
},
|
},
|
||||||
|
compile_cpu_time: {
|
||||||
|
desc: 'Max CPU time allowed for compile stage in milliseconds',
|
||||||
|
default: 10000, // 10 seconds
|
||||||
|
parser: parse_int,
|
||||||
|
validators: [(x, raw) => !is_nan(x) || `${raw} is not a number`],
|
||||||
|
},
|
||||||
|
run_cpu_time: {
|
||||||
|
desc: 'Max CPU time allowed for run stage in milliseconds',
|
||||||
|
default: 3000, // 3 seconds
|
||||||
|
parser: parse_int,
|
||||||
|
validators: [(x, raw) => !is_nan(x) || `${raw} is not a number`],
|
||||||
|
},
|
||||||
compile_memory_limit: {
|
compile_memory_limit: {
|
||||||
desc: 'Max memory usage for compile stage in bytes (set to -1 for no limit)',
|
desc: 'Max memory usage for compile stage in bytes (set to -1 for no limit)',
|
||||||
default: -1, // no limit
|
default: -1, // no limit
|
||||||
|
@ -117,7 +129,7 @@ const options = {
|
||||||
limit_overrides: {
|
limit_overrides: {
|
||||||
desc: 'Per-language exceptions in JSON format for each of:\
|
desc: 'Per-language exceptions in JSON format for each of:\
|
||||||
max_process_count, max_open_files, max_file_size, compile_memory_limit,\
|
max_process_count, max_open_files, max_file_size, compile_memory_limit,\
|
||||||
run_memory_limit, compile_timeout, run_timeout, output_max_size',
|
run_memory_limit, compile_timeout, run_timeout, compile_cpu_time, run_cpu_time, output_max_size',
|
||||||
default: {},
|
default: {},
|
||||||
parser: parse_overrides,
|
parser: parse_overrides,
|
||||||
validators: [
|
validators: [
|
||||||
|
@ -165,6 +177,8 @@ function parse_overrides(overrides_string) {
|
||||||
'run_memory_limit',
|
'run_memory_limit',
|
||||||
'compile_timeout',
|
'compile_timeout',
|
||||||
'run_timeout',
|
'run_timeout',
|
||||||
|
'compile_cpu_time',
|
||||||
|
'run_cpu_time',
|
||||||
'output_max_size',
|
'output_max_size',
|
||||||
].includes(key)
|
].includes(key)
|
||||||
) {
|
) {
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
CGROUP_FS="/sys/fs/cgroup"
|
||||||
|
if [ ! -e "$CGROUP_FS" ]; then
|
||||||
|
echo "Cannot find $CGROUP_FS. Please make sure your system is using cgroup v2"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -e "$CGROUP_FS/unified" ]; then
|
||||||
|
echo "Combined cgroup v1+v2 mode is not supported. Please make sure your system is using pure cgroup v2"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -e "$CGROUP_FS/cgroup.subtree_control" ]; then
|
||||||
|
echo "Cgroup v2 not found. Please make sure cgroup v2 is enabled on your system"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd /sys/fs/cgroup && \
|
||||||
|
mkdir isolate/ && \
|
||||||
|
echo 1 > isolate/cgroup.procs && \
|
||||||
|
echo '+cpuset +cpu +io +memory +pids' > cgroup.subtree_control && \
|
||||||
|
cd isolate && \
|
||||||
|
mkdir init && \
|
||||||
|
echo 1 > init/cgroup.procs && \
|
||||||
|
echo '+cpuset +memory' > cgroup.subtree_control && \
|
||||||
|
echo "Initialized cgroup" && \
|
||||||
|
chown -R piston:piston /piston && \
|
||||||
|
exec su -- piston -c 'ulimit -n 65536 && node /piston_api/src'
|
|
@ -7,14 +7,78 @@ const platform = `${is_docker() ? 'docker' : 'baremetal'}-${fs
|
||||||
.split('\n')
|
.split('\n')
|
||||||
.find(x => x.startsWith('ID'))
|
.find(x => x.startsWith('ID'))
|
||||||
.replace('ID=', '')}`;
|
.replace('ID=', '')}`;
|
||||||
|
const SIGNALS = {
|
||||||
|
1: 'SIGHUP',
|
||||||
|
2: 'SIGINT',
|
||||||
|
3: 'SIGQUIT',
|
||||||
|
4: 'SIGILL',
|
||||||
|
5: 'SIGTRAP',
|
||||||
|
6: 'SIGABRT',
|
||||||
|
7: 'SIGBUS',
|
||||||
|
8: 'SIGFPE',
|
||||||
|
9: 'SIGKILL',
|
||||||
|
10: 'SIGUSR1',
|
||||||
|
11: 'SIGSEGV',
|
||||||
|
12: 'SIGUSR2',
|
||||||
|
13: 'SIGPIPE',
|
||||||
|
14: 'SIGALRM',
|
||||||
|
15: 'SIGTERM',
|
||||||
|
16: 'SIGSTKFLT',
|
||||||
|
17: 'SIGCHLD',
|
||||||
|
18: 'SIGCONT',
|
||||||
|
19: 'SIGSTOP',
|
||||||
|
20: 'SIGTSTP',
|
||||||
|
21: 'SIGTTIN',
|
||||||
|
22: 'SIGTTOU',
|
||||||
|
23: 'SIGURG',
|
||||||
|
24: 'SIGXCPU',
|
||||||
|
25: 'SIGXFSZ',
|
||||||
|
26: 'SIGVTALRM',
|
||||||
|
27: 'SIGPROF',
|
||||||
|
28: 'SIGWINCH',
|
||||||
|
29: 'SIGIO',
|
||||||
|
30: 'SIGPWR',
|
||||||
|
31: 'SIGSYS',
|
||||||
|
34: 'SIGRTMIN',
|
||||||
|
35: 'SIGRTMIN+1',
|
||||||
|
36: 'SIGRTMIN+2',
|
||||||
|
37: 'SIGRTMIN+3',
|
||||||
|
38: 'SIGRTMIN+4',
|
||||||
|
39: 'SIGRTMIN+5',
|
||||||
|
40: 'SIGRTMIN+6',
|
||||||
|
41: 'SIGRTMIN+7',
|
||||||
|
42: 'SIGRTMIN+8',
|
||||||
|
43: 'SIGRTMIN+9',
|
||||||
|
44: 'SIGRTMIN+10',
|
||||||
|
45: 'SIGRTMIN+11',
|
||||||
|
46: 'SIGRTMIN+12',
|
||||||
|
47: 'SIGRTMIN+13',
|
||||||
|
48: 'SIGRTMIN+14',
|
||||||
|
49: 'SIGRTMIN+15',
|
||||||
|
50: 'SIGRTMAX-14',
|
||||||
|
51: 'SIGRTMAX-13',
|
||||||
|
52: 'SIGRTMAX-12',
|
||||||
|
53: 'SIGRTMAX-11',
|
||||||
|
54: 'SIGRTMAX-10',
|
||||||
|
55: 'SIGRTMAX-9',
|
||||||
|
56: 'SIGRTMAX-8',
|
||||||
|
57: 'SIGRTMAX-7',
|
||||||
|
58: 'SIGRTMAX-6',
|
||||||
|
59: 'SIGRTMAX-5',
|
||||||
|
60: 'SIGRTMAX-4',
|
||||||
|
61: 'SIGRTMAX-3',
|
||||||
|
62: 'SIGRTMAX-2',
|
||||||
|
63: 'SIGRTMAX-1',
|
||||||
|
64: 'SIGRTMAX',
|
||||||
|
};
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
data_directories: {
|
data_directories: {
|
||||||
packages: 'packages',
|
packages: 'packages',
|
||||||
jobs: 'jobs',
|
|
||||||
},
|
},
|
||||||
version: require('../package.json').version,
|
version: require('../package.json').version,
|
||||||
platform,
|
platform,
|
||||||
pkg_installed_file: '.ppman-installed', //Used as indication for if a package was installed
|
pkg_installed_file: '.ppman-installed', //Used as indication for if a package was installed
|
||||||
clean_directories: ['/dev/shm', '/run/lock', '/tmp', '/var/tmp'],
|
clean_directories: ['/dev/shm', '/run/lock', '/tmp', '/var/tmp'],
|
||||||
|
SIGNALS,
|
||||||
};
|
};
|
||||||
|
|
|
@ -35,10 +35,6 @@ expressWs(app);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
fss.chmodSync(
|
|
||||||
path.join(config.data_directory, globals.data_directories.jobs),
|
|
||||||
0o711
|
|
||||||
);
|
|
||||||
|
|
||||||
logger.info('Loading packages');
|
logger.info('Loading packages');
|
||||||
const pkgdir = path.join(
|
const pkgdir = path.join(
|
||||||
|
|
616
api/src/job.js
616
api/src/job.js
|
@ -1,13 +1,10 @@
|
||||||
const logplease = require('logplease');
|
const logplease = require('logplease');
|
||||||
const logger = logplease.create('job');
|
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
const cp = require('child_process');
|
const cp = require('child_process');
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const config = require('./config');
|
const config = require('./config');
|
||||||
const globals = require('./globals');
|
|
||||||
const fs = require('fs/promises');
|
const fs = require('fs/promises');
|
||||||
const fss = require('fs');
|
const globals = require('./globals');
|
||||||
const wait_pid = require('waitpid');
|
|
||||||
|
|
||||||
const job_states = {
|
const job_states = {
|
||||||
READY: Symbol('Ready to be primed'),
|
READY: Symbol('Ready to be primed'),
|
||||||
|
@ -15,17 +12,26 @@ const job_states = {
|
||||||
EXECUTED: Symbol('Executed and ready for cleanup'),
|
EXECUTED: Symbol('Executed and ready for cleanup'),
|
||||||
};
|
};
|
||||||
|
|
||||||
let uid = 0;
|
const MAX_BOX_ID = 999;
|
||||||
let gid = 0;
|
const ISOLATE_PATH = '/usr/local/bin/isolate';
|
||||||
|
let box_id = 0;
|
||||||
|
|
||||||
let remaining_job_spaces = config.max_concurrent_jobs;
|
let remaining_job_spaces = config.max_concurrent_jobs;
|
||||||
let job_queue = [];
|
let job_queue = [];
|
||||||
|
|
||||||
class Job {
|
const get_next_box_id = () => ++box_id % MAX_BOX_ID;
|
||||||
#active_timeouts;
|
|
||||||
#active_parent_processes;
|
|
||||||
|
|
||||||
constructor({ runtime, files, args, stdin, timeouts, memory_limits }) {
|
class Job {
|
||||||
|
#dirty_boxes;
|
||||||
|
constructor({
|
||||||
|
runtime,
|
||||||
|
files,
|
||||||
|
args,
|
||||||
|
stdin,
|
||||||
|
timeouts,
|
||||||
|
cpu_times,
|
||||||
|
memory_limits,
|
||||||
|
}) {
|
||||||
this.uuid = uuidv4();
|
this.uuid = uuidv4();
|
||||||
|
|
||||||
this.logger = logplease.create(`job/${this.uuid}`);
|
this.logger = logplease.create(`job/${this.uuid}`);
|
||||||
|
@ -46,29 +52,39 @@ class Job {
|
||||||
this.stdin += '\n';
|
this.stdin += '\n';
|
||||||
}
|
}
|
||||||
|
|
||||||
this.#active_timeouts = [];
|
|
||||||
this.#active_parent_processes = [];
|
|
||||||
|
|
||||||
this.timeouts = timeouts;
|
this.timeouts = timeouts;
|
||||||
|
this.cpu_times = cpu_times;
|
||||||
this.memory_limits = memory_limits;
|
this.memory_limits = memory_limits;
|
||||||
|
|
||||||
this.uid = config.runner_uid_min + uid;
|
|
||||||
this.gid = config.runner_gid_min + gid;
|
|
||||||
|
|
||||||
uid++;
|
|
||||||
gid++;
|
|
||||||
|
|
||||||
uid %= config.runner_uid_max - config.runner_uid_min + 1;
|
|
||||||
gid %= config.runner_gid_max - config.runner_gid_min + 1;
|
|
||||||
|
|
||||||
this.logger.debug(`Assigned uid=${this.uid} gid=${this.gid}`);
|
|
||||||
|
|
||||||
this.state = job_states.READY;
|
this.state = job_states.READY;
|
||||||
this.dir = path.join(
|
this.#dirty_boxes = [];
|
||||||
config.data_directory,
|
}
|
||||||
globals.data_directories.jobs,
|
|
||||||
this.uuid
|
async #create_isolate_box() {
|
||||||
);
|
const box_id = get_next_box_id();
|
||||||
|
const metadata_file_path = `/tmp/${box_id}-metadata.txt`;
|
||||||
|
return new Promise((res, rej) => {
|
||||||
|
cp.exec(
|
||||||
|
`isolate --init --cg -b${box_id}`,
|
||||||
|
(error, stdout, stderr) => {
|
||||||
|
if (error) {
|
||||||
|
rej(
|
||||||
|
`Failed to run isolate --init: ${error.message}\nstdout: ${stdout}\nstderr: ${stderr}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (stdout === '') {
|
||||||
|
rej('Received empty stdout from isolate --init');
|
||||||
|
}
|
||||||
|
const box = {
|
||||||
|
id: box_id,
|
||||||
|
metadata_file_path,
|
||||||
|
dir: `${stdout.trim()}/box`,
|
||||||
|
};
|
||||||
|
this.#dirty_boxes.push(box);
|
||||||
|
res(box);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async prime() {
|
async prime() {
|
||||||
|
@ -80,208 +96,237 @@ class Job {
|
||||||
}
|
}
|
||||||
this.logger.info(`Priming job`);
|
this.logger.info(`Priming job`);
|
||||||
remaining_job_spaces--;
|
remaining_job_spaces--;
|
||||||
this.logger.debug('Writing files to job cache');
|
this.logger.debug('Running isolate --init');
|
||||||
|
const box = await this.#create_isolate_box();
|
||||||
this.logger.debug(`Transfering ownership`);
|
|
||||||
|
|
||||||
await fs.mkdir(this.dir, { mode: 0o700 });
|
|
||||||
await fs.chown(this.dir, this.uid, this.gid);
|
|
||||||
|
|
||||||
|
this.logger.debug(`Creating submission files in Isolate box`);
|
||||||
|
const submission_dir = path.join(box.dir, 'submission');
|
||||||
|
await fs.mkdir(submission_dir);
|
||||||
for (const file of this.files) {
|
for (const file of this.files) {
|
||||||
const file_path = path.join(this.dir, file.name);
|
const file_path = path.join(submission_dir, file.name);
|
||||||
const rel = path.relative(this.dir, file_path);
|
const rel = path.relative(submission_dir, file_path);
|
||||||
const file_content = Buffer.from(file.content, file.encoding);
|
|
||||||
|
|
||||||
if (rel.startsWith('..'))
|
if (rel.startsWith('..'))
|
||||||
throw Error(
|
throw Error(
|
||||||
`File path "${file.name}" tries to escape parent directory: ${rel}`
|
`File path "${file.name}" tries to escape parent directory: ${rel}`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const file_content = Buffer.from(file.content, file.encoding);
|
||||||
|
|
||||||
await fs.mkdir(path.dirname(file_path), {
|
await fs.mkdir(path.dirname(file_path), {
|
||||||
recursive: true,
|
recursive: true,
|
||||||
mode: 0o700,
|
mode: 0o700,
|
||||||
});
|
});
|
||||||
await fs.chown(path.dirname(file_path), this.uid, this.gid);
|
|
||||||
|
|
||||||
await fs.write_file(file_path, file_content);
|
await fs.write_file(file_path, file_content);
|
||||||
await fs.chown(file_path, this.uid, this.gid);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this.state = job_states.PRIMED;
|
this.state = job_states.PRIMED;
|
||||||
|
|
||||||
this.logger.debug('Primed job');
|
this.logger.debug('Primed job');
|
||||||
|
return box;
|
||||||
}
|
}
|
||||||
|
|
||||||
exit_cleanup() {
|
async safe_call(
|
||||||
for (const timeout of this.#active_timeouts) {
|
box,
|
||||||
clear_timeout(timeout);
|
file,
|
||||||
}
|
args,
|
||||||
this.#active_timeouts = [];
|
timeout,
|
||||||
this.logger.debug('Cleared the active timeouts');
|
cpu_time,
|
||||||
|
memory_limit,
|
||||||
|
event_bus = null
|
||||||
|
) {
|
||||||
|
let stdout = '';
|
||||||
|
let stderr = '';
|
||||||
|
let output = '';
|
||||||
|
let memory = null;
|
||||||
|
let code = null;
|
||||||
|
let signal = null;
|
||||||
|
let message = null;
|
||||||
|
let status = null;
|
||||||
|
let cpu_time_stat = null;
|
||||||
|
let wall_time_stat = null;
|
||||||
|
|
||||||
this.cleanup_processes();
|
const proc = cp.spawn(
|
||||||
this.logger.debug(`Finished exit cleanup`);
|
ISOLATE_PATH,
|
||||||
}
|
[
|
||||||
|
'--run',
|
||||||
close_cleanup() {
|
`-b${box.id}`,
|
||||||
for (const proc of this.#active_parent_processes) {
|
`--meta=${box.metadata_file_path}`,
|
||||||
proc.stderr.destroy();
|
'--cg',
|
||||||
if (!proc.stdin.destroyed) {
|
|
||||||
proc.stdin.end();
|
|
||||||
proc.stdin.destroy();
|
|
||||||
}
|
|
||||||
proc.stdout.destroy();
|
|
||||||
}
|
|
||||||
this.#active_parent_processes = [];
|
|
||||||
this.logger.debug('Destroyed processes writables');
|
|
||||||
}
|
|
||||||
|
|
||||||
async safe_call(file, args, timeout, memory_limit, event_bus = null) {
|
|
||||||
return new Promise((resolve, reject) => {
|
|
||||||
const nonetwork = config.disable_networking ? ['nosocket'] : [];
|
|
||||||
|
|
||||||
const prlimit = [
|
|
||||||
'prlimit',
|
|
||||||
'--nproc=' + this.runtime.max_process_count,
|
|
||||||
'--nofile=' + this.runtime.max_open_files,
|
|
||||||
'--fsize=' + this.runtime.max_file_size,
|
|
||||||
];
|
|
||||||
|
|
||||||
const timeout_call = [
|
|
||||||
'timeout',
|
|
||||||
'-s',
|
'-s',
|
||||||
'9',
|
'-c',
|
||||||
Math.ceil(timeout / 1000),
|
'/box/submission',
|
||||||
];
|
'-e',
|
||||||
|
`--dir=${this.runtime.pkgdir}`,
|
||||||
if (memory_limit >= 0) {
|
`--dir=/etc:noexec`,
|
||||||
prlimit.push('--as=' + memory_limit);
|
`--processes=${this.runtime.max_process_count}`,
|
||||||
}
|
`--open-files=${this.runtime.max_open_files}`,
|
||||||
|
`--fsize=${Math.floor(this.runtime.max_file_size / 1000)}`,
|
||||||
const proc_call = [
|
`--wall-time=${timeout / 1000}`,
|
||||||
'nice',
|
`--time=${cpu_time / 1000}`,
|
||||||
...timeout_call,
|
`--extra-time=0`,
|
||||||
...prlimit,
|
...(memory_limit >= 0
|
||||||
...nonetwork,
|
? [`--cg-mem=${Math.floor(memory_limit / 1000)}`]
|
||||||
'bash',
|
: []),
|
||||||
file,
|
...(config.disable_networking ? [] : ['--share-net']),
|
||||||
|
'--',
|
||||||
|
'/bin/bash',
|
||||||
|
path.join(this.runtime.pkgdir, file),
|
||||||
...args,
|
...args,
|
||||||
];
|
],
|
||||||
|
{
|
||||||
var stdout = '';
|
|
||||||
var stderr = '';
|
|
||||||
var output = '';
|
|
||||||
|
|
||||||
const proc = cp.spawn(proc_call[0], proc_call.splice(1), {
|
|
||||||
env: {
|
env: {
|
||||||
...this.runtime.env_vars,
|
...this.runtime.env_vars,
|
||||||
PISTON_LANGUAGE: this.runtime.language,
|
PISTON_LANGUAGE: this.runtime.language,
|
||||||
},
|
},
|
||||||
stdio: 'pipe',
|
stdio: 'pipe',
|
||||||
cwd: this.dir,
|
|
||||||
uid: this.uid,
|
|
||||||
gid: this.gid,
|
|
||||||
detached: true, //give this process its own process group
|
|
||||||
});
|
|
||||||
|
|
||||||
this.#active_parent_processes.push(proc);
|
|
||||||
|
|
||||||
if (event_bus === null) {
|
|
||||||
proc.stdin.write(this.stdin);
|
|
||||||
proc.stdin.end();
|
|
||||||
proc.stdin.destroy();
|
|
||||||
} else {
|
|
||||||
event_bus.on('stdin', data => {
|
|
||||||
proc.stdin.write(data);
|
|
||||||
});
|
|
||||||
|
|
||||||
event_bus.on('kill', signal => {
|
|
||||||
proc.kill(signal);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
);
|
||||||
|
|
||||||
const kill_timeout =
|
if (event_bus === null) {
|
||||||
(timeout >= 0 &&
|
proc.stdin.write(this.stdin);
|
||||||
set_timeout(async _ => {
|
proc.stdin.end();
|
||||||
this.logger.info(`Timeout exceeded timeout=${timeout}`);
|
proc.stdin.destroy();
|
||||||
try {
|
} else {
|
||||||
process.kill(proc.pid, 'SIGKILL');
|
event_bus.on('stdin', data => {
|
||||||
}
|
proc.stdin.write(data);
|
||||||
catch (e) {
|
|
||||||
// Could already be dead and just needs to be waited on
|
|
||||||
this.logger.debug(
|
|
||||||
`Got error while SIGKILLing process ${proc}:`,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}, timeout)) ||
|
|
||||||
null;
|
|
||||||
this.#active_timeouts.push(kill_timeout);
|
|
||||||
|
|
||||||
proc.stderr.on('data', async data => {
|
|
||||||
if (event_bus !== null) {
|
|
||||||
event_bus.emit('stderr', data);
|
|
||||||
} else if ((stderr.length + data.length) > this.runtime.output_max_size) {
|
|
||||||
this.logger.info(`stderr length exceeded`);
|
|
||||||
try {
|
|
||||||
process.kill(proc.pid, 'SIGKILL');
|
|
||||||
}
|
|
||||||
catch (e) {
|
|
||||||
// Could already be dead and just needs to be waited on
|
|
||||||
this.logger.debug(
|
|
||||||
`Got error while SIGKILLing process ${proc}:`,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
stderr += data;
|
|
||||||
output += data;
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
proc.stdout.on('data', async data => {
|
event_bus.on('kill', signal => {
|
||||||
if (event_bus !== null) {
|
proc.kill(signal);
|
||||||
event_bus.emit('stdout', data);
|
|
||||||
} else if ((stdout.length + data.length) > this.runtime.output_max_size) {
|
|
||||||
this.logger.info(`stdout length exceeded`);
|
|
||||||
try {
|
|
||||||
process.kill(proc.pid, 'SIGKILL');
|
|
||||||
}
|
|
||||||
catch (e) {
|
|
||||||
// Could already be dead and just needs to be waited on
|
|
||||||
this.logger.debug(
|
|
||||||
`Got error while SIGKILLing process ${proc}:`,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
stdout += data;
|
|
||||||
output += data;
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
proc.on('exit', () => this.exit_cleanup());
|
proc.stderr.on('data', async data => {
|
||||||
|
if (event_bus !== null) {
|
||||||
|
event_bus.emit('stderr', data);
|
||||||
|
} else if (
|
||||||
|
stderr.length + data.length >
|
||||||
|
this.runtime.output_max_size
|
||||||
|
) {
|
||||||
|
message = 'stderr length exceeded';
|
||||||
|
status = 'EL';
|
||||||
|
this.logger.info(message);
|
||||||
|
try {
|
||||||
|
process.kill(proc.pid, 'SIGABRT');
|
||||||
|
} catch (e) {
|
||||||
|
// Could already be dead and just needs to be waited on
|
||||||
|
this.logger.debug(
|
||||||
|
`Got error while SIGABRTing process ${proc}:`,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stderr += data;
|
||||||
|
output += data;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
proc.on('close', (code, signal) => {
|
proc.stdout.on('data', async data => {
|
||||||
this.close_cleanup();
|
if (event_bus !== null) {
|
||||||
|
event_bus.emit('stdout', data);
|
||||||
|
} else if (
|
||||||
|
stdout.length + data.length >
|
||||||
|
this.runtime.output_max_size
|
||||||
|
) {
|
||||||
|
message = 'stdout length exceeded';
|
||||||
|
status = 'OL';
|
||||||
|
this.logger.info(message);
|
||||||
|
try {
|
||||||
|
process.kill(proc.pid, 'SIGABRT');
|
||||||
|
} catch (e) {
|
||||||
|
// Could already be dead and just needs to be waited on
|
||||||
|
this.logger.debug(
|
||||||
|
`Got error while SIGABRTing process ${proc}:`,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stdout += data;
|
||||||
|
output += data;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
resolve({ stdout, stderr, code, signal, output });
|
const data = await new Promise((res, rej) => {
|
||||||
|
proc.on('exit', (_, signal) => {
|
||||||
|
res({
|
||||||
|
signal,
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
proc.on('error', err => {
|
proc.on('error', err => {
|
||||||
this.exit_cleanup();
|
rej({
|
||||||
this.close_cleanup();
|
error: err,
|
||||||
|
});
|
||||||
reject({ error: err, stdout, stderr, output });
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
const metadata_str = (
|
||||||
|
await fs.read_file(box.metadata_file_path)
|
||||||
|
).toString();
|
||||||
|
const metadata_lines = metadata_str.split('\n');
|
||||||
|
for (const line of metadata_lines) {
|
||||||
|
if (!line) continue;
|
||||||
|
|
||||||
|
const [key, value] = line.split(':');
|
||||||
|
if (key === undefined || value === undefined) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to parse metadata file, received: ${line}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
switch (key) {
|
||||||
|
case 'cg-mem':
|
||||||
|
memory = parse_int(value) * 1000;
|
||||||
|
break;
|
||||||
|
case 'exitcode':
|
||||||
|
code = parse_int(value);
|
||||||
|
break;
|
||||||
|
case 'exitsig':
|
||||||
|
signal = globals.SIGNALS[parse_int(value)] ?? null;
|
||||||
|
break;
|
||||||
|
case 'message':
|
||||||
|
message = message || value;
|
||||||
|
break;
|
||||||
|
case 'status':
|
||||||
|
status = status || value;
|
||||||
|
break;
|
||||||
|
case 'time':
|
||||||
|
cpu_time_stat = parse_float(value) * 1000;
|
||||||
|
break;
|
||||||
|
case 'time-wall':
|
||||||
|
wall_time_stat = parse_float(value) * 1000;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
throw new Error(
|
||||||
|
`Error reading metadata file: ${box.metadata_file_path}\nError: ${e.message}\nIsolate run stdout: ${stdout}\nIsolate run stderr: ${stderr}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
...data,
|
||||||
|
stdout,
|
||||||
|
stderr,
|
||||||
|
code,
|
||||||
|
signal: ['TO', 'OL', 'EL'].includes(status) ? 'SIGKILL' : signal,
|
||||||
|
output,
|
||||||
|
memory,
|
||||||
|
message,
|
||||||
|
status,
|
||||||
|
cpu_time: cpu_time_stat,
|
||||||
|
wall_time: wall_time_stat,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
async execute(event_bus = null) {
|
async execute(box, event_bus = null) {
|
||||||
if (this.state !== job_states.PRIMED) {
|
if (this.state !== job_states.PRIMED) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
'Job must be in primed state, current state: ' +
|
'Job must be in primed state, current state: ' +
|
||||||
this.state.toString()
|
this.state.toString()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,49 +343,61 @@ class Job {
|
||||||
const { emit_event_bus_result, emit_event_bus_stage } =
|
const { emit_event_bus_result, emit_event_bus_stage } =
|
||||||
event_bus === null
|
event_bus === null
|
||||||
? {
|
? {
|
||||||
emit_event_bus_result: () => { },
|
emit_event_bus_result: () => {},
|
||||||
emit_event_bus_stage: () => { },
|
emit_event_bus_stage: () => {},
|
||||||
}
|
}
|
||||||
: {
|
: {
|
||||||
emit_event_bus_result: (stage, result, event_bus) => {
|
emit_event_bus_result: (stage, result) => {
|
||||||
const { error, code, signal } = result;
|
const { error, code, signal } = result;
|
||||||
event_bus.emit('exit', stage, {
|
event_bus.emit('exit', stage, {
|
||||||
error,
|
error,
|
||||||
code,
|
code,
|
||||||
signal,
|
signal,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
emit_event_bus_stage: (stage, event_bus) => {
|
emit_event_bus_stage: stage => {
|
||||||
event_bus.emit('stage', stage);
|
event_bus.emit('stage', stage);
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
if (this.runtime.compiled) {
|
if (this.runtime.compiled) {
|
||||||
this.logger.debug('Compiling');
|
this.logger.debug('Compiling');
|
||||||
emit_event_bus_stage('compile', event_bus);
|
emit_event_bus_stage('compile');
|
||||||
compile = await this.safe_call(
|
compile = await this.safe_call(
|
||||||
path.join(this.runtime.pkgdir, 'compile'),
|
box,
|
||||||
|
'compile',
|
||||||
code_files.map(x => x.name),
|
code_files.map(x => x.name),
|
||||||
this.timeouts.compile,
|
this.timeouts.compile,
|
||||||
|
this.cpu_times.compile,
|
||||||
this.memory_limits.compile,
|
this.memory_limits.compile,
|
||||||
event_bus
|
event_bus
|
||||||
);
|
);
|
||||||
emit_event_bus_result('compile', compile, event_bus);
|
emit_event_bus_result('compile', compile);
|
||||||
compile_errored = compile.code !== 0;
|
compile_errored = compile.code !== 0;
|
||||||
|
if (!compile_errored) {
|
||||||
|
const old_box_dir = box.dir;
|
||||||
|
box = await this.#create_isolate_box();
|
||||||
|
await fs.rename(
|
||||||
|
path.join(old_box_dir, 'submission'),
|
||||||
|
path.join(box.dir, 'submission')
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let run;
|
let run;
|
||||||
if (!compile_errored) {
|
if (!compile_errored) {
|
||||||
this.logger.debug('Running');
|
this.logger.debug('Running');
|
||||||
emit_event_bus_stage('run', event_bus);
|
emit_event_bus_stage('run');
|
||||||
run = await this.safe_call(
|
run = await this.safe_call(
|
||||||
path.join(this.runtime.pkgdir, 'run'),
|
box,
|
||||||
|
'run',
|
||||||
[code_files[0].name, ...this.args],
|
[code_files[0].name, ...this.args],
|
||||||
this.timeouts.run,
|
this.timeouts.run,
|
||||||
|
this.cpu_times.run,
|
||||||
this.memory_limits.run,
|
this.memory_limits.run,
|
||||||
event_bus
|
event_bus
|
||||||
);
|
);
|
||||||
emit_event_bus_result('run', run, event_bus);
|
emit_event_bus_result('run', run);
|
||||||
}
|
}
|
||||||
|
|
||||||
this.state = job_states.EXECUTED;
|
this.state = job_states.EXECUTED;
|
||||||
|
@ -353,139 +410,34 @@ class Job {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup_processes(dont_wait = []) {
|
|
||||||
let processes = [1];
|
|
||||||
const to_wait = [];
|
|
||||||
this.logger.debug(`Cleaning up processes`);
|
|
||||||
|
|
||||||
while (processes.length > 0) {
|
|
||||||
processes = [];
|
|
||||||
|
|
||||||
const proc_ids = fss.readdir_sync('/proc');
|
|
||||||
|
|
||||||
processes = proc_ids.map(proc_id => {
|
|
||||||
if (isNaN(proc_id)) return -1;
|
|
||||||
try {
|
|
||||||
const proc_status = fss.read_file_sync(
|
|
||||||
path.join('/proc', proc_id, 'status')
|
|
||||||
);
|
|
||||||
const proc_lines = proc_status.to_string().split('\n');
|
|
||||||
const state_line = proc_lines.find(line =>
|
|
||||||
line.starts_with('State:')
|
|
||||||
);
|
|
||||||
const uid_line = proc_lines.find(line =>
|
|
||||||
line.starts_with('Uid:')
|
|
||||||
);
|
|
||||||
const [_, ruid, euid, suid, fuid] = uid_line.split(/\s+/);
|
|
||||||
|
|
||||||
const [_1, state, user_friendly] = state_line.split(/\s+/);
|
|
||||||
|
|
||||||
const proc_id_int = parse_int(proc_id);
|
|
||||||
|
|
||||||
// Skip over any processes that aren't ours.
|
|
||||||
if (ruid != this.uid && euid != this.uid) return -1;
|
|
||||||
|
|
||||||
if (state == 'Z') {
|
|
||||||
// Zombie process, just needs to be waited, regardless of the user id
|
|
||||||
if (!to_wait.includes(proc_id_int))
|
|
||||||
to_wait.push(proc_id_int);
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
// We should kill in all other state (Sleep, Stopped & Running)
|
|
||||||
|
|
||||||
return proc_id_int;
|
|
||||||
} catch {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
});
|
|
||||||
|
|
||||||
processes = processes.filter(p => p > 0);
|
|
||||||
|
|
||||||
if (processes.length > 0)
|
|
||||||
this.logger.debug(`Got processes to kill: ${processes}`);
|
|
||||||
|
|
||||||
for (const proc of processes) {
|
|
||||||
// First stop the processes, but keep their resources allocated so they cant re-fork
|
|
||||||
try {
|
|
||||||
process.kill(proc, 'SIGSTOP');
|
|
||||||
} catch (e) {
|
|
||||||
// Could already be dead
|
|
||||||
this.logger.debug(
|
|
||||||
`Got error while SIGSTOPping process ${proc}:`,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const proc of processes) {
|
|
||||||
// Then clear them out of the process tree
|
|
||||||
try {
|
|
||||||
process.kill(proc, 'SIGKILL');
|
|
||||||
} catch (e) {
|
|
||||||
// Could already be dead and just needs to be waited on
|
|
||||||
this.logger.debug(
|
|
||||||
`Got error while SIGKILLing process ${proc}:`,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
to_wait.push(proc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
this.logger.debug(
|
|
||||||
`Finished kill-loop, calling wait_pid to end any zombie processes`
|
|
||||||
);
|
|
||||||
|
|
||||||
for (const proc of to_wait) {
|
|
||||||
if (dont_wait.includes(proc)) continue;
|
|
||||||
|
|
||||||
wait_pid(proc);
|
|
||||||
}
|
|
||||||
|
|
||||||
this.logger.debug(`Cleaned up processes`);
|
|
||||||
}
|
|
||||||
|
|
||||||
async cleanup_filesystem() {
|
|
||||||
for (const clean_path of globals.clean_directories) {
|
|
||||||
const contents = await fs.readdir(clean_path);
|
|
||||||
|
|
||||||
for (const file of contents) {
|
|
||||||
const file_path = path.join(clean_path, file);
|
|
||||||
|
|
||||||
try {
|
|
||||||
const stat = await fs.stat(file_path);
|
|
||||||
|
|
||||||
if (stat.uid === this.uid) {
|
|
||||||
await fs.rm(file_path, {
|
|
||||||
recursive: true,
|
|
||||||
force: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
// File was somehow deleted in the time that we read the dir to when we checked the file
|
|
||||||
this.logger.warn(`Error removing file ${file_path}: ${e}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
await fs.rm(this.dir, { recursive: true, force: true });
|
|
||||||
}
|
|
||||||
|
|
||||||
async cleanup() {
|
async cleanup() {
|
||||||
this.logger.info(`Cleaning up job`);
|
this.logger.info(`Cleaning up job`);
|
||||||
|
|
||||||
this.exit_cleanup(); // Run process janitor, just incase there are any residual processes somehow
|
|
||||||
this.close_cleanup();
|
|
||||||
await this.cleanup_filesystem();
|
|
||||||
|
|
||||||
remaining_job_spaces++;
|
remaining_job_spaces++;
|
||||||
if (job_queue.length > 0) {
|
if (job_queue.length > 0) {
|
||||||
job_queue.shift()();
|
job_queue.shift()();
|
||||||
}
|
}
|
||||||
|
await Promise.all(
|
||||||
|
this.#dirty_boxes.map(async box => {
|
||||||
|
cp.exec(
|
||||||
|
`isolate --cleanup --cg -b${box.id}`,
|
||||||
|
(error, stdout, stderr) => {
|
||||||
|
if (error) {
|
||||||
|
this.logger.error(
|
||||||
|
`Failed to run isolate --cleanup: ${error.message} on box #${box.id}\nstdout: ${stdout}\nstderr: ${stderr}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
try {
|
||||||
|
await fs.rm(box.metadata_file_path);
|
||||||
|
} catch (e) {
|
||||||
|
this.logger.error(
|
||||||
|
`Failed to remove the metadata directory of box #${box.id}. Error: ${e.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
CC = gcc
|
|
||||||
CFLAGS = -O2 -Wall -lseccomp
|
|
||||||
TARGET = nosocket
|
|
||||||
BUILD_PATH = ./
|
|
||||||
INSTALL_PATH = /usr/local/bin/
|
|
||||||
SOURCE = nosocket.c
|
|
||||||
|
|
||||||
all: $(TARGET)
|
|
||||||
|
|
||||||
$(TARGET): $(SOURCE)
|
|
||||||
$(CC) $(BUILD_PATH)$(SOURCE) $(CFLAGS) -o $(TARGET)
|
|
||||||
|
|
||||||
install:
|
|
||||||
mv $(TARGET) $(INSTALL_PATH)
|
|
||||||
|
|
||||||
clean:
|
|
||||||
$(RM) $(TARGET)
|
|
||||||
$(RM) $(INSTALL_PATH)$(TARGET)
|
|
||||||
|
|
|
@ -1,62 +0,0 @@
|
||||||
/*
|
|
||||||
nosocket.c
|
|
||||||
|
|
||||||
Disables access to the `socket` syscall and runs a program provided as the first
|
|
||||||
commandline argument.
|
|
||||||
*/
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <errno.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <sys/prctl.h>
|
|
||||||
#include <seccomp.h>
|
|
||||||
|
|
||||||
int main(int argc, char *argv[])
|
|
||||||
{
|
|
||||||
// Disallow any new capabilities from being added
|
|
||||||
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
|
|
||||||
|
|
||||||
// SCMP_ACT_ALLOW lets the filter have no effect on syscalls not matching a
|
|
||||||
// configured filter rule (allow all by default)
|
|
||||||
scmp_filter_ctx ctx = seccomp_init(SCMP_ACT_ALLOW);
|
|
||||||
if (!ctx)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Unable to initialize seccomp filter context\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add 32 bit and 64 bit architectures to seccomp filter
|
|
||||||
int rc;
|
|
||||||
uint32_t arch[] = {SCMP_ARCH_X86_64, SCMP_ARCH_X86, SCMP_ARCH_X32};
|
|
||||||
// We first remove the existing arch, otherwise our subsequent call to add
|
|
||||||
// it will fail
|
|
||||||
seccomp_arch_remove(ctx, seccomp_arch_native());
|
|
||||||
for (int i = 0; i < sizeof(arch) / sizeof(arch[0]); i++)
|
|
||||||
{
|
|
||||||
rc = seccomp_arch_add(ctx, arch[i]);
|
|
||||||
if (rc != 0)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Unable to add arch: %d\n", arch[i]);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a seccomp rule to the syscall blacklist - blacklist the socket syscall
|
|
||||||
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EACCES), SCMP_SYS(socket), 0) < 0)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Unable to add seccomp rule to context\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
seccomp_export_pfc(ctx, 0);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (argc < 2)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Usage %s: %s <program name> <arguments>\n", argv[0], argv[0]);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
seccomp_load(ctx);
|
|
||||||
execvp(argv[1], argv + 1);
|
|
||||||
return 1;
|
|
||||||
}
|
|
|
@ -145,7 +145,11 @@ class Package {
|
||||||
await fs.write_file(path.join(this.install_path, '.env'), filtered_env);
|
await fs.write_file(path.join(this.install_path, '.env'), filtered_env);
|
||||||
|
|
||||||
logger.debug('Changing Ownership of package directory');
|
logger.debug('Changing Ownership of package directory');
|
||||||
await util.promisify(chownr)(this.install_path, 0, 0);
|
await util.promisify(chownr)(
|
||||||
|
this.install_path,
|
||||||
|
process.getuid(),
|
||||||
|
process.getgid()
|
||||||
|
);
|
||||||
|
|
||||||
logger.debug('Writing installed state to disk');
|
logger.debug('Writing installed state to disk');
|
||||||
await fs.write_file(
|
await fs.write_file(
|
||||||
|
|
|
@ -15,6 +15,7 @@ class Runtime {
|
||||||
pkgdir,
|
pkgdir,
|
||||||
runtime,
|
runtime,
|
||||||
timeouts,
|
timeouts,
|
||||||
|
cpu_times,
|
||||||
memory_limits,
|
memory_limits,
|
||||||
max_process_count,
|
max_process_count,
|
||||||
max_open_files,
|
max_open_files,
|
||||||
|
@ -27,6 +28,7 @@ class Runtime {
|
||||||
this.pkgdir = pkgdir;
|
this.pkgdir = pkgdir;
|
||||||
this.runtime = runtime;
|
this.runtime = runtime;
|
||||||
this.timeouts = timeouts;
|
this.timeouts = timeouts;
|
||||||
|
this.cpu_times = cpu_times;
|
||||||
this.memory_limits = memory_limits;
|
this.memory_limits = memory_limits;
|
||||||
this.max_process_count = max_process_count;
|
this.max_process_count = max_process_count;
|
||||||
this.max_open_files = max_open_files;
|
this.max_open_files = max_open_files;
|
||||||
|
@ -62,6 +64,18 @@ class Runtime {
|
||||||
language_limit_overrides
|
language_limit_overrides
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
cpu_times: {
|
||||||
|
compile: this.compute_single_limit(
|
||||||
|
language_name,
|
||||||
|
'compile_cpu_time',
|
||||||
|
language_limit_overrides
|
||||||
|
),
|
||||||
|
run: this.compute_single_limit(
|
||||||
|
language_name,
|
||||||
|
'run_cpu_time',
|
||||||
|
language_limit_overrides
|
||||||
|
),
|
||||||
|
},
|
||||||
memory_limits: {
|
memory_limits: {
|
||||||
compile: this.compute_single_limit(
|
compile: this.compute_single_limit(
|
||||||
language_name,
|
language_name,
|
||||||
|
|
|
@ -23,8 +23,8 @@ fetch_packages(){
|
||||||
mkdir build
|
mkdir build
|
||||||
# Start a piston container
|
# Start a piston container
|
||||||
docker run \
|
docker run \
|
||||||
|
--privileged \
|
||||||
-v "$PWD/build":'/piston/packages' \
|
-v "$PWD/build":'/piston/packages' \
|
||||||
--tmpfs /piston/jobs \
|
|
||||||
-dit \
|
-dit \
|
||||||
-p $port:2000 \
|
-p $port:2000 \
|
||||||
--name builder_piston_instance \
|
--name builder_piston_instance \
|
||||||
|
@ -61,4 +61,4 @@ fetch_packages $SPEC_FILE
|
||||||
build_container $TAG
|
build_container $TAG
|
||||||
|
|
||||||
echo "Start your custom piston container with"
|
echo "Start your custom piston container with"
|
||||||
echo "$ docker run --tmpfs /piston/jobs -dit -p 2000:2000 $TAG"
|
echo "$ docker run --privileged -dit -p 2000:2000 $TAG"
|
||||||
|
|
|
@ -4,8 +4,7 @@ services:
|
||||||
api:
|
api:
|
||||||
build: api
|
build: api
|
||||||
container_name: piston_api
|
container_name: piston_api
|
||||||
cap_add:
|
privileged: true
|
||||||
- CAP_SYS_ADMIN
|
|
||||||
restart: always
|
restart: always
|
||||||
ports:
|
ports:
|
||||||
- 2000:2000
|
- 2000:2000
|
||||||
|
@ -13,8 +12,6 @@ services:
|
||||||
- ./data/piston/packages:/piston/packages
|
- ./data/piston/packages:/piston/packages
|
||||||
environment:
|
environment:
|
||||||
- PISTON_REPO_URL=http://repo:8000/index
|
- PISTON_REPO_URL=http://repo:8000/index
|
||||||
tmpfs:
|
|
||||||
- /piston/jobs:exec,uid=1000,gid=1000,mode=711
|
|
||||||
|
|
||||||
repo: # Local testing of packages
|
repo: # Local testing of packages
|
||||||
build: repo
|
build: repo
|
||||||
|
|
|
@ -5,10 +5,10 @@ services:
|
||||||
image: ghcr.io/engineer-man/piston
|
image: ghcr.io/engineer-man/piston
|
||||||
container_name: piston_api
|
container_name: piston_api
|
||||||
restart: always
|
restart: always
|
||||||
|
privileged: true
|
||||||
ports:
|
ports:
|
||||||
- 2000:2000
|
- 2000:2000
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/piston/packages:/piston/packages
|
- ./data/piston/packages:/piston/packages
|
||||||
tmpfs:
|
tmpfs:
|
||||||
- /piston/jobs:exec,uid=1000,gid=1000,mode=711
|
|
||||||
- /tmp:exec
|
- /tmp:exec
|
||||||
|
|
|
@ -135,8 +135,21 @@ key:
|
||||||
default: 3000
|
default: 3000
|
||||||
```
|
```
|
||||||
|
|
||||||
The maximum time that is allowed to be taken by a stage in milliseconds.
|
The maximum time that is allowed to be taken by a stage in milliseconds. This is the wall-time of the stage. The time that the CPU does not spend working on the stage (e.g, due to context switches or IO) is counted.
|
||||||
Use -1 for unlimited time.
|
|
||||||
|
## Compile/Run CPU-Time
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
key:
|
||||||
|
- PISTON_COMPILE_CPU_TIME
|
||||||
|
default: 10000
|
||||||
|
|
||||||
|
key:
|
||||||
|
- PISTON_RUN_CPU_TIME
|
||||||
|
default: 3000
|
||||||
|
```
|
||||||
|
|
||||||
|
The maximum CPU-time that is allowed to be consumed by a stage in milliseconds. The time that the CPU does not spend working on the stage (e.g, IO and context switches) is not counted. This option is typically used in algorithm contests.
|
||||||
|
|
||||||
## Compile/Run memory limits
|
## Compile/Run memory limits
|
||||||
|
|
||||||
|
@ -178,7 +191,7 @@ default: {}
|
||||||
```
|
```
|
||||||
|
|
||||||
Per-language overrides/exceptions for the each of `max_process_count`, `max_open_files`, `max_file_size`,
|
Per-language overrides/exceptions for the each of `max_process_count`, `max_open_files`, `max_file_size`,
|
||||||
`compile_memory_limit`, `run_memory_limit`, `compile_timeout`, `run_timeout`, `output_max_size`. Defined as follows:
|
`compile_memory_limit`, `run_memory_limit`, `compile_timeout`, `run_timeout`, `compile_cpu_time`, `run_cpu_time`, `output_max_size`. Defined as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
PISTON_LIMIT_OVERRIDES={"c++":{"max_process_count":128}}
|
PISTON_LIMIT_OVERRIDES={"c++":{"max_process_count":128}}
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
"version": "0.10.1",
|
"version": "0.10.1",
|
||||||
"aliases": [],
|
"aliases": [],
|
||||||
"limit_overrides": {
|
"limit_overrides": {
|
||||||
"compile_timeout": 15000
|
"compile_timeout": 15000,
|
||||||
|
"compile_cpu_time": 15000
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -3,6 +3,7 @@
|
||||||
"version": "0.8.0",
|
"version": "0.8.0",
|
||||||
"aliases": ["zig"],
|
"aliases": ["zig"],
|
||||||
"limit_overrides": {
|
"limit_overrides": {
|
||||||
"compile_timeout": 15000
|
"compile_timeout": 15000,
|
||||||
|
"compile_cpu_time": 15000
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
"version": "0.9.1",
|
"version": "0.9.1",
|
||||||
"aliases": ["zig"],
|
"aliases": ["zig"],
|
||||||
"limit_overrides": {
|
"limit_overrides": {
|
||||||
"compile_timeout": 15000
|
"compile_timeout": 15000,
|
||||||
|
"compile_cpu_time": 15000
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
87
readme.md
87
readme.md
|
@ -104,7 +104,8 @@ POST https://emkc.org/api/v2/piston/execute
|
||||||
|
|
||||||
- Docker
|
- Docker
|
||||||
- Docker Compose
|
- Docker Compose
|
||||||
- Node JS (>= 13, preferably >= 15)
|
- Node JS (>= 15)
|
||||||
|
- cgroup v2 enabled, and cgroup v1 disabled
|
||||||
|
|
||||||
### After system dependencies are installed, clone this repository:
|
### After system dependencies are installed, clone this repository:
|
||||||
|
|
||||||
|
@ -113,6 +114,10 @@ POST https://emkc.org/api/v2/piston/execute
|
||||||
git clone https://github.com/engineer-man/piston
|
git clone https://github.com/engineer-man/piston
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
>
|
||||||
|
> Ensure the repository is cloned with LF line endings
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
@ -135,8 +140,8 @@ The API will now be online with no language runtimes installed. To install runti
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker run \
|
docker run \
|
||||||
|
--privileged \
|
||||||
-v $PWD:'/piston' \
|
-v $PWD:'/piston' \
|
||||||
--tmpfs /piston/jobs \
|
|
||||||
-dit \
|
-dit \
|
||||||
-p 2000:2000 \
|
-p 2000:2000 \
|
||||||
--name piston_api \
|
--name piston_api \
|
||||||
|
@ -245,8 +250,10 @@ This endpoint requests execution of some arbitrary code.
|
||||||
- `files[].encoding` (_optional_) The encoding scheme used for the file content. One of `base64`, `hex` or `utf8`. Defaults to `utf8`.
|
- `files[].encoding` (_optional_) The encoding scheme used for the file content. One of `base64`, `hex` or `utf8`. Defaults to `utf8`.
|
||||||
- `stdin` (_optional_) The text to pass as stdin to the program. Must be a string or left out. Defaults to blank string.
|
- `stdin` (_optional_) The text to pass as stdin to the program. Must be a string or left out. Defaults to blank string.
|
||||||
- `args` (_optional_) The arguments to pass to the program. Must be an array or left out. Defaults to `[]`.
|
- `args` (_optional_) The arguments to pass to the program. Must be an array or left out. Defaults to `[]`.
|
||||||
- `compile_timeout` (_optional_) The maximum time allowed for the compile stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `10000` (10 seconds).
|
- `compile_timeout` (_optional_) The maximum wall-time allowed for the compile stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `10000` (10 seconds).
|
||||||
- `run_timeout` (_optional_) The maximum time allowed for the run stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `3000` (3 seconds).
|
- `run_timeout` (_optional_) The maximum wall-time allowed for the run stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `3000` (3 seconds).
|
||||||
|
- `compile_cpu_time` (_optional_) The maximum CPU-time allowed for the compile stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `10000` (10 seconds).
|
||||||
|
- `run_cpu_time` (_optional_) The maximum CPU-time allowed for the run stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `3000` (3 seconds).
|
||||||
- `compile_memory_limit` (_optional_) The maximum amount of memory the compile stage is allowed to use in bytes. Must be a number or left out. Defaults to `-1` (no limit)
|
- `compile_memory_limit` (_optional_) The maximum amount of memory the compile stage is allowed to use in bytes. Must be a number or left out. Defaults to `-1` (no limit)
|
||||||
- `run_memory_limit` (_optional_) The maximum amount of memory the run stage is allowed to use in bytes. Must be a number or left out. Defaults to `-1` (no limit)
|
- `run_memory_limit` (_optional_) The maximum amount of memory the run stage is allowed to use in bytes. Must be a number or left out. Defaults to `-1` (no limit)
|
||||||
|
|
||||||
|
@ -264,6 +271,8 @@ This endpoint requests execution of some arbitrary code.
|
||||||
"args": ["1", "2", "3"],
|
"args": ["1", "2", "3"],
|
||||||
"compile_timeout": 10000,
|
"compile_timeout": 10000,
|
||||||
"run_timeout": 3000,
|
"run_timeout": 3000,
|
||||||
|
"compile_cpu_time": 10000,
|
||||||
|
"run_cpu_time": 3000,
|
||||||
"compile_memory_limit": -1,
|
"compile_memory_limit": -1,
|
||||||
"run_memory_limit": -1
|
"run_memory_limit": -1
|
||||||
}
|
}
|
||||||
|
@ -273,7 +282,14 @@ A typical response upon successful execution will contain 1 or 2 keys `run` and
|
||||||
`compile` will only be present if the language requested requires a compile stage.
|
`compile` will only be present if the language requested requires a compile stage.
|
||||||
|
|
||||||
Each of these keys has an identical structure, containing both a `stdout` and `stderr` key, which is a string containing the text outputted during the stage into each buffer.
|
Each of these keys has an identical structure, containing both a `stdout` and `stderr` key, which is a string containing the text outputted during the stage into each buffer.
|
||||||
It also contains the `code` and `signal` which was returned from each process.
|
It also contains the `code` and `signal` which was returned from each process. It also includes a nullable human-readable `message` which is a description of why a stage has failed and a two-letter `status` that is either:
|
||||||
|
|
||||||
|
- `RE` for runtime error
|
||||||
|
- `SG` for dying on a signal
|
||||||
|
- `TO` for timeout (either via `timeout` or `cpu_time`)
|
||||||
|
- `OL` for stdout length exceeded
|
||||||
|
- `EL` for stderr length exceeded
|
||||||
|
- `XX` for internal error
|
||||||
|
|
||||||
```json
|
```json
|
||||||
HTTP/1.1 200 OK
|
HTTP/1.1 200 OK
|
||||||
|
@ -287,7 +303,12 @@ Content-Type: application/json
|
||||||
"stderr": "",
|
"stderr": "",
|
||||||
"output": "[\n '/piston/packages/node/15.10.0/bin/node',\n '/piston/jobs/9501b09d-0105-496b-b61a-e5148cf66384/my_cool_code.js',\n '1',\n '2',\n '3'\n]\n",
|
"output": "[\n '/piston/packages/node/15.10.0/bin/node',\n '/piston/jobs/9501b09d-0105-496b-b61a-e5148cf66384/my_cool_code.js',\n '1',\n '2',\n '3'\n]\n",
|
||||||
"code": 0,
|
"code": 0,
|
||||||
"signal": null
|
"signal": null,
|
||||||
|
"message": null,
|
||||||
|
"status": null,
|
||||||
|
"cpu_time": 8,
|
||||||
|
"wall_time": 154,
|
||||||
|
"memory": 1160000
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
@ -303,6 +324,40 @@ Content-Type: application/json
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Interactive execution endpoint (not available through the public API)
|
||||||
|
|
||||||
|
To interact with running processes in real time, you can establish a WebSocket connection at `/api/v2/connect`. This allows you to both receive output and send input to active processes.
|
||||||
|
|
||||||
|
Each message is structured as a JSON object with a `type` key, which indicates the action to perform. Below is a list of message types, their directions, and descriptions:
|
||||||
|
|
||||||
|
- **init** (client -> server): Initializes a job with the same parameters as the `/execute` endpoint, except that stdin is discarded.
|
||||||
|
- **runtime** (server -> client): Provides details on the runtime environment, including the version and language.
|
||||||
|
- **stage** (server -> client): Indicates the current execution stage, either "compile" or "run."
|
||||||
|
- **data** (server <-> client): Exchanges data between the client and server, such as stdin, stdout, or stderr streams.
|
||||||
|
- **signal** (client -> server): Sends a signal (e.g., for termination) to the running process, whether it's in the "compile" or "run" stage.
|
||||||
|
- **exit** (server -> client): Signals the end of a stage, along with the exit code or signal.
|
||||||
|
- **error** (server -> client): Reports an error, typically right before the WebSocket is closed.
|
||||||
|
|
||||||
|
An example of this endpoint in use is depicted below (**<** = client to server, **>** = server to client)
|
||||||
|
|
||||||
|
1. Client establishes WebSocket connection to `/api/v2/connect`
|
||||||
|
2. **<** `{"type":"init", "language":"bash", "version":"*", "files":[{"content": "cat"}]}`
|
||||||
|
3. **>** `{"type":"runtime","language": "bash", "version": "5.1.0"}`
|
||||||
|
4. **>** `{"type":"stage", "stage":"run"}`
|
||||||
|
5. **<** `{"type":"data", "stream":"stdin", "data":"Hello World!"}`
|
||||||
|
6. **>** `{"type":"data", "stream":"stdout", "data":"Hello World!"}`
|
||||||
|
7. _time passes_
|
||||||
|
8. **>** `{"type":"exit", "stage":"run", "code":null, "signal": "SIGKILL"}`
|
||||||
|
|
||||||
|
Errors may return status codes as follows:
|
||||||
|
|
||||||
|
- **4000: Already Initialized**: Sent when a second `init` command is issued.
|
||||||
|
- **4001: Initialization Timeout**: No `init` command was sent within 1 second of connection.
|
||||||
|
- **4002: Notified Error**: A fatal error occurred, and an `error` packet was transmitted.
|
||||||
|
- **4003: Not yet Initialized**: A non-`init` command was sent without a job context.
|
||||||
|
- **4004: Can only write to stdin**: The client attempted to write to a stream other than stdin.
|
||||||
|
- **4005: Invalid Signal**: An invalid signal was sent in a `signal` packet.
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
# Supported Languages
|
# Supported Languages
|
||||||
|
@ -396,26 +451,26 @@ Content-Type: application/json
|
||||||
|
|
||||||
# Principle of Operation
|
# Principle of Operation
|
||||||
|
|
||||||
Piston uses Docker as the primary mechanism for sandboxing. There is an API within the container written in Node
|
Piston uses [Isolate](https://www.ucw.cz/moe/isolate.1.html) inside Docker as the primary mechanism for sandboxing. There is an API within the container written in Node
|
||||||
which takes in execution requests and executees them within the container safely.
|
which takes in execution requests and executes them within the container safely.
|
||||||
High level, the API writes any source code to a temporary directory in `/piston/jobs`.
|
High level, the API writes any source code and executes it inside an Isolate sandbox.
|
||||||
The source file is either ran or compiled and ran (in the case of languages like c, c++, c#, go, etc.).
|
The source file is either ran or compiled and ran (in the case of languages like c, c++, c#, go, etc.).
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
# Security
|
# Security
|
||||||
|
|
||||||
Docker provides a great deal of security out of the box in that it's separate from the system.
|
Piston uses Isolate which makes use of Linux namespaces, chroot, multiple unprivileged users, and cgroup for sandboxing and resource limiting. Code execution submissions on Piston shall not be aware of each other, shall not affect each other and shall not affect the underlying host system. This is ensured through multiple steps including:
|
||||||
Piston takes additional steps to make it resistant to
|
|
||||||
various privilege escalation, denial-of-service, and resource saturation threats. These steps include:
|
|
||||||
|
|
||||||
- Disabling outgoing network interaction
|
- Disabling outgoing network interaction by default
|
||||||
- Capping max processes at 256 by default (resists `:(){ :|: &}:;`, `while True: os.fork()`, etc.)
|
- Capping max processes at 256 by default (resists `:(){ :|: &}:;`, `while True: os.fork()`, etc.)
|
||||||
- Capping max files at 2048 (resists various file based attacks)
|
- Capping max files at 2048 (resists various file based attacks)
|
||||||
- Cleaning up all temp space after each execution (resists out of drive space attacks)
|
- Cleaning up all temp space after each execution (resists out of drive space attacks)
|
||||||
- Running as a variety of unprivileged users
|
- Running each submission as a different unprivileged user
|
||||||
- Capping runtime execution at 3 seconds
|
- Running each submission with its own isolated Linux namespaces
|
||||||
- Capping stdout to 65536 characters (resists yes/no bombs and runaway output)
|
- Capping runtime execution at 3 seconds by default (CPU-time and wall-time)
|
||||||
|
- Capping the peak memory that all the submission's processes can use
|
||||||
|
- Capping stdout to 1024 characters by default (resists yes/no bombs and runaway output)
|
||||||
- SIGKILLing misbehaving code
|
- SIGKILLing misbehaving code
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
Loading…
Reference in New Issue