ts-0.7.4/0000700000175000017500000000000012240114520011137 5ustar viricviricts-0.7.4/README0000600000175000017500000000061212240114503012021 0ustar viricviricUsers ------------------------ If you are in a system which understands POSIX and has the GNU toolkit, you probably can run: make make install If you want to install to another path than /usr/local, you can run: make install PREFIX=/usr Developers ------------------------ Run ". setenv" before adding bugs to the database. Use 'bug' for the database. http://freshmeat.net/projects/bug/ ts-0.7.4/env.c0000600000175000017500000000373012240114503012101 0ustar viricviric/* Task Spooler - a task queue system for the unix user Copyright (C) 2007-2009 Lluís Batlle i Rossell Please find the license in the provided COPYING file. */ #include #include #include #include #include #include #include #include #include "main.h" static int fork_command(const char *command) { int pid; int p[2]; int fdnull; fdnull = open("/dev/null", O_RDONLY); if (fdnull == -1) error("Cannot open /dev/null"); pipe(p); pid = fork(); switch(pid) { case 0: restore_sigmask(); close(server_socket); dup2(fdnull, 0); if (fdnull != 0) close(fdnull); dup2(p[1], 1); dup2(p[1], 2); if (p[1] != 1 && p[1] != 2) close(p[1]); close(p[0]); execlp("/bin/sh", "/bin/sh", "-c", command, 0); error("/bin/sh exec error"); case -1: error("Fork error"); default: close(p[1]); } return p[0]; } char * get_environment() { char *ptr; char *command; int readfd; int bytes = 0; int alloc = 0; command = getenv("TS_ENV"); if (command == 0) return 0; readfd = fork_command(command); ptr = 0; do { int next; int res; next = bytes + 1000; if (next > alloc) { ptr = realloc(ptr, next); alloc = next; } res = read(readfd, ptr + bytes, 1000); if (res < 0) error("Cannot read from the TS_ENV command (%s)", command); else if (res == 0) break; else bytes += res; } while(1); /* We will always have 1000 bytes more to be written, on end. * We add a null for it to be an ASCIIZ string. */ ptr[bytes] = '\0'; close(readfd); wait(0); return ptr; } ts-0.7.4/PORTABILITY0000600000175000017500000000127212240114503012671 0ustar viricviricANSI C and POSIX ------------------------- The code should compile without any warning in a system providing an ANSI C compiler and a Single Unix Specification rev 5 library. I tried only with glibc. If you find any inconsistency between the code and this statement, please let know the auther. Compilers ------------------------- (Lluís) I could compile easily with gcc. I could also compile with tcc 0.9.23 if I change the features to -D_XOPEN_SOURCE -D_XOPEN_SOURCE_EXTENDED -D__STRICT_ANSI__ . HP-UX 10.2 in PA-RISC ------------------------- (Lluís) I could compile ts 0.3 changing the #include for sys/select.h in server.c to sys/time.h. I also had to use gcc instead of the system's cc. ts-0.7.4/print.c0000600000175000017500000000200712240114503012441 0ustar viricviric/* Task Spooler - a task queue system for the unix user Copyright (C) 2007-2009 Lluís Batlle i Rossell Please find the license in the provided COPYING file. */ #include #include #include #include #include #include #include "main.h" /* maxsize: max buffer size, with '\0' */ int fd_nprintf(int fd, int maxsize, const char *fmt, ...) { va_list ap; char *out; int size; int rest; va_start(ap, fmt); out = (char *) malloc(maxsize * sizeof(char)); if (out == 0) { warning("Not enough memory in fd_nprintf()"); return -1; } size = vsnprintf(out, maxsize, fmt, ap); rest = size; /* We don't want the last null character */ while (rest > 0) { int res; res = write(fd, out, rest); if (res == -1) { warning("Cannot write more chars in pinfo_dump"); break; } rest -= res; } free(out); return size; } ts-0.7.4/deb/0000700000175000017500000000000012240114503011672 5ustar viricviricts-0.7.4/deb/pkg/0000700000175000017500000000000012240114503012453 5ustar viricviricts-0.7.4/deb/pkg/DEBIAN/0000700000175000017500000000000012240114503013375 5ustar viricviricts-0.7.4/deb/pkg/DEBIAN/control0000600000175000017500000000124212240114503015001 0ustar viricviricPackage: ts Version: 0.5.4 Section: base Priority: optional Architecture: i386 Essential: no Installed-Size: 100 Maintainer: Lluis Batlle Depends: coreutils Conflicts: moreutils Description: task spooler is a Unix batch system where the tasks spooled run one after the other. Each user in each system has his own job queue. The tasks are run in the correct context (that of enqueue) from any shell/process, and its output/results can be easily watched. It is very useful when you know that your commands depend on a lot of RAM, a lot of disk use, give a lot of output, or for whatever reason it's better not to run them at the same time. ts-0.7.4/info.c0000600000175000017500000000556212240114503012251 0ustar viricviric/* Task Spooler - a task queue system for the unix user Copyright (C) 2007-2009 Lluís Batlle i Rossell Please find the license in the provided COPYING file. */ #include #include #include #include #include #include #include "main.h" void pinfo_init(struct Procinfo *p) { p->ptr = 0; p->nchars = 0; p->allocchars = 0; p->start_time.tv_sec = 0; p->start_time.tv_usec = 0; p->end_time.tv_sec = 0; p->end_time.tv_usec = 0; p->enqueue_time.tv_sec = 0; p->enqueue_time.tv_usec = 0; } void pinfo_free(struct Procinfo *p) { if (p->ptr) { free(p->ptr); } p->nchars = 0; p->allocchars = 0; } void pinfo_addinfo(struct Procinfo *p, int maxsize, const char *line, ...) { va_list ap; int newchars = p->nchars + maxsize; void *newptr; int res; va_start(ap, line); /* Ask for more memory for the array, if needed */ if (newchars > p->allocchars) { int newmem; int newalloc; newalloc = newchars; newmem = newchars * sizeof(*p->ptr); newptr = realloc(p->ptr, newmem); if(newptr == 0) { warning("Cannot realloc more memory (%i) in pinfo_addline. " "Not adding the content.", newmem); return; } p->ptr = (char *) newptr; p->allocchars = newalloc; } res = vsnprintf(p->ptr + p->nchars, (p->allocchars - p->nchars), line, ap); p->nchars += res; /* We don't store the final 0 */ } void pinfo_dump(const struct Procinfo *p, int fd) { if (p->ptr) { int res; int rest = p->nchars; while (rest > 0) { res = write(fd, p->ptr, rest); if (res == -1) { warning("Cannot write more chars in pinfo_dump"); return; } rest -= res; } } } int pinfo_size(const struct Procinfo *p) { return p->nchars; } void pinfo_set_enqueue_time(struct Procinfo *p) { gettimeofday(&p->enqueue_time, 0); p->start_time.tv_sec = 0; p->start_time.tv_usec = 0; p->end_time.tv_sec = 0; p->end_time.tv_usec = 0; } void pinfo_set_start_time(struct Procinfo *p) { gettimeofday(&p->start_time, 0); p->end_time.tv_sec = 0; p->end_time.tv_usec = 0; } void pinfo_set_end_time(struct Procinfo *p) { gettimeofday(&p->end_time, 0); } float pinfo_time_until_now(const struct Procinfo *p) { float t; struct timeval now; gettimeofday(&now, 0); t = now.tv_sec - p->start_time.tv_sec; t += (float) (now.tv_usec - p->start_time.tv_usec) / 1000000.; return t; } float pinfo_time_run(const struct Procinfo *p) { float t; t = p->end_time.tv_sec - p->start_time.tv_sec; t += (float) (p->end_time.tv_usec - p->start_time.tv_usec) / 1000000.; return t; } ts-0.7.4/Changelog0000600000175000017500000001343612240114503012763 0ustar viricviric## Features to be implemented Doubtful tasks: - Allow the 'only-tail' output, without any storage. The number of lines or bytes should be choosable. - No program should give that big amount of output, so it cannot be stored. - What happens if the output disk is full? Should be decide a good behaviour in that situation? - It's up to the running program; ts gives the descsriptor to it. Future: - Use a better system than mkstemp() for finding output files, so we can add .gz to the gzipped outputs. v0.7.4: - Fixing a bug about dangling processes, in case of using "ts -r". v0.7.3: - Add option '-N' to set the number of slots required for a job to run. Proposed by Sergey Litvinov. v0.7.2: - Add option '-E', to keep stderr apart. It goes to "`ts -o`.e". v0.7.1: - Implement check of ownership of the socket. Security bugfix. v0.7.0: - Implement blocking of enqueuing still allowing other queue operations. - Add the -B parameter, that will not block in case of queue full v0.6.6: - Fixed the feature TS_MAXCONN, which collided with the protocol version check. v0.6.5: - Fixed a problem that -c and -t, if their pipe was broken, they remained. - Fixed a problem (maybe some copypaste once?) on -l, that created always an error msg. - Adding the possibility of limiting the amount of ts connections. v0.6.4: - Fixed a bug breaking -c and -t. v0.6.3: - Fixed a bug on -c and -t. - Adding first support for -D (run depending on any job) - Adding version control on the protocol. - Making the ts server chdir to the socket directory, so it doesn't annoy on umounting. v0.6.2: - Fixed a bug on -w - Making -S return the number of slots - Fixed a bug on clients dying (making the queue not usable anymore) - Making an error file different for each socket ($TS_SOCKET.error) - Making ts not to log on the error log file constantly. - Adding information on the exit status in -i v0.6.1: - Adding support for the TS_SLOTS env variable, to set the number of slots of a starting server. - Removing references to /usr for nix. - Bugfixing (fixed error handling, -m) v0.6: - Adding multi-slot running capabilities. Big changes in the job management. - Bugfixing (-t, -c and -r, mostly) v0.5.4.1: - Moving out 'debian', so the Debian packagers can use their systems comfortably when packaging ts. v0.5.4: - Bug fixing (-t, actions to last job ids, -w, ...) - -c waits for the job end. Like -t, but it shows the whole output. v0.5.3: - Fixed: Only one waiting ts process ('-w' and '-t') could be used for each jobid. Now, many. v0.5.2: - Fixed the '-r' without jobid, which should remove the last added job. - Fixed some messages for '-t' without jobid (it said something about job -1). v0.5.1: - Fixed the new '-t', which had several problems v0.5: - Several bug fixes (related to -w, -r and -L) - ts creates sessions, so "kill -- -`ts -p`" can be used - New implementation for '-t', which exits when finished, similar to '-w' v0.4.2: - Added labels with -L, in order to distingish commands better on queue list. - Added per-job information, readable through -i, configurable through TS_ENV - Added dependant enqueuing (-d). Conditional run based on last job result. v0.4.1: -* Bugfix: removing a job now doesn't leave the job's ts client alive -* Bugfix: killing the running job's ts client now doesn't block the queue forever -* Bugfix: now using setsid() in order to unlink ts from the bash session. v0.4: -* Allow killing 'ts' in a nice way. - It's good, because a spawner of 'ts' may want to kill it. -* If the server is horribly killed (it may happen as this is software), store the task queue in a file, so it can be recovered. -* Store the 'times()' for the run tasks. v0.3.2: -* Fixed the $POSIXLY_CORRECT bug finally -* Slightly better makefile and man page -* The server should never run out of handles - simply block the client -* Added an error reporting system (in ts(1) - BUGS) -* SIGPIPE is ignored in the ts processes v0.3.1: -* Fixed a bug with $POSIXLY_CORRECT being passed to the jobs. This break the gentoo emerge jobs. -* Fixed a posix compatibility problem for BSD. -* Fixed - the fd 3 is no more opened for the task processes. -* Added a man page and fixed the makefile. v0.3: -* Allow sending the output by mail, or passing it to a program if an env var defines it. -* Allow gzipping the output -* Add a flag for swapping two jobs in the queue (-U id-id) -* Limit the jobs in 'finished' state according to $TS_MAXFINISHED -* Output the jobid in stdout. -* Status check with -s. Return the state in stdout. -* An env var may define the socket path. -* bash is no more used for running commands. Commands are run as is. v0.2.3: -* '-nf' doesn't leave the job in 'finished' state -* Fixed a bug in -r -* '-u' implemented. For altering the queue order. -* '-w' returns the waited errorlevel -* '-o' returns the output file of the last job v0.2.1: -* Fixed a bug in the wait-notifications. -* Improved the compilation, including almost all the proper files. v0.2: -* Add a '-h' for help -* Have a nice list output (correctly formatted) -* Allow killing the running job (Allowed with -p easily) -* Allow tailing any job -* Allow removing a job from the queue -* Allow waiting any job v0.1: -* The clients should _always_ go into background. * -* Allow to receive more parameters in the command line, and them be joined with spaces for the command passed to bash. -* There should be no limit on command line parameters. -* There should be no limit on lines outputed by "-l". -* Get the list of finished jobs, with its errorlevel. -* The list of finished jobs should be clenead on demmand "-c". -* Output to a file should be possible. Use mkstemp(). -* There should be no limit on output filename. v0.0.1: -* add tasks, and they run one after another. Input closed. Output in stdout. ts-0.7.4/setenv0000600000175000017500000000004412240114503012367 0ustar viricviricexport BUG_PROJECT=$PWD/buglist.bug ts-0.7.4/web/0000700000175000017500000000000012240114503011715 5ustar viricviricts-0.7.4/web/ts-0.2.1.png0000600000175000017500000001145412240114503013514 0ustar viricviricPNG  IHDR4 PLTE\\KIDATxˊ'M/G3 il8{x8OahiBLe-+//쮯~E($˗gxz{+?h^^c3T^{O6yyyקx{}ϧ?? /P 4oo//׷ Ͽa@[РiH`X7hkD ph#h}KV-Ҽ MK<}}u˗/?goLS+\o}$X8!l3ƥ#!"W~rJ [)4)gG#$2/EÓPr%@^[aLsKdUxrI l7CVV% =>~,VM:JNӤb1.#|D.0Pr] @!ՍƺD4f(TZ:zDG = ~s\F1G. DsbNvsO@sn2:xsL|n2ҧ`=XЬrr hjQ2~ܧG#WǛ`#WKXTxb5}E%X=UtJWlhDf y#v3 Dĭh(H. [ݧn:4h[u  yf+܎ }ԣ\ &xS.&mrsaD4k2Ȩ+gg{@j gKdtCK Zf@b@Uh *҆,;dTSD 4]C4j!&p@@kۧ \7WWZYܨG e&6C# VD5G&oP [jm}x )@MK4w2мI#YdbDC}iD{t^g X Y8޸h*Z*(DP uDGf;bcc6FAM^T A%+:P%jub5U!cRBGdx.*Ѥ>EcVLY& b*v319o]@){@S tDS7z;*}cGK}oBQ\^H:wF-l4dM|pH#РXhtDh{D}m!MV8']hf1QH'_[i,޽e/A πIO@%ζgTq P7 JiVnP@sz-" M4ۍ c4nHL :SdkS6'cVրu!'|3ݕbJCx O|=fc+"1ɓ4hcљ4(is41H؋~8BL yLC!Lֽ7xxkށb ktK83i)T7LĐ0-E]_909n<͟) kkT/$rK.){5o㇏Rƅ(ۜA:64j~n&zf- 'KވUm|ar 7|m!9iĭ8] UԚdᄂҭ& @!k* q uuZo);Kj*kn;:-e'{A ĭ si7\Tk'pyfL.X) 7/~ɑE5N]/yh~A[y\[1^-jhR )~sLR,8bq.,KW-pmK÷P4|eyt ߊf[.< _ni}joD3Q'߁M(wP(X1D<*\ُEG4#q-4|46د} i J@`4RQ!lgm aV(-iPF4rF#DN4DNLG܄ntfV\&pVl)Ҧfui217~p֙OYZWΊ4`!P"f{@sXg/'r_K6ВuW=dw&_]PJ=Bn4:iȬI|y,DQ ,DS7'~"qކ]*56PU3 z̍"07•uyX&-oȀF h*S.W7iAH}]ypFP{ŘhiDӍzT5K WM:q0Vhpڅ)0DDW59I rb['ΞǙ 2S4ѿIiլ(}+pPhH eI1Pa4jh!Gp 剺vs4FCu<9 4´A"_tVRZ787uKsMI=@!5#_'$)Kεp:%UiF41B@2 :M:%Ui0 Wi&2O@OExJi4.,GC=qI69 WIOp=Bjăs \ЉQ)~jJhpS'5\E0-).)48[!) d)rQwh1 Lg[˻>}zH3w=ܭ!xz8[g)OE3#-NV.N]fzN]fz2;סV*=mVNƧU-9iN\~) _v?r9mGN#(;GNEGNeGNGNۥiYqwEsw2z9mGN[GNGN[6 Linux.com :: Queuing tasks for batch execution with Task Spooler

Queuing tasks for batch execution with Task Spooler

By Ben Martin on August 12, 2008 (9:00:00 AM)

The Task Spooler project allows you to queue up tasks from the shell for batch execution. Task Spooler is simple to use and requires no configuration. You can view and edit queued commands, and you can view the output of queued commands at any time.

Task Spooler has some similarities with other delayed and batch execution projects, such as "at." While both Task Spooler and at handle multiple queues and allow the execution of commands at a later point, the at project handles output from commands by emailing the results to the user who queued the command, while Task Spooler allows you to get at the results from the command line instead. Another major difference is that Task Spooler is not aimed at executing commands at a specific time, but rather at simply adding to and executing commands from queues.

The main repositories for Fedora, openSUSE, and Ubuntu do not contain packages for Task Spooler. There are packages for some versions of Debian, Ubuntu, and openSUSE 10.x available along with the source code on the project's homepage. In this article I'll use a 64-bit Fedora 9 machine and install version 0.6 of Task Spooler from source. Task Spooler does not use autotools to build, so to install it, simply run make; sudo make install. This will install the main Task Spooler command ts and its manual page into /usr/local.

A simple interaction with Task Spooler is shown below. First I add a new job to the queue and check the status. As the command is a very simple one, it is likely to have been executed immediately. Executing ts by itself with no arguments shows the executing queue, including tasks that have completed. I then use ts -c to get at the stdout of the executed command. The -c option uses cat to display the output file for a task. Using ts -i shows you information about the job. To clear finished jobs from the queue, use the ts -C command, not shown in the example.

$ ts echo "hello world" 6 $ ts ID State Output E-Level Times(r/u/s) Command [run=0/1] 6 finished /tmp/ts-out.QoKfo9 0 0.00/0.00/0.00 echo hello world $ ts -c 6 hello world $ ts -i 6 Command: echo hello world Enqueue time: Tue Jul 22 14:42:22 2008 Start time: Tue Jul 22 14:42:22 2008 End time: Tue Jul 22 14:42:22 2008 Time run: 0.003336s

The -t option operates like tail -f, showing you the last few lines of output and continuing to show you any new output from the task. If you would like to be notified when a task has completed, you can use the -m option to have the results mailed to you, or you can queue another command to be executed that just performs the notification. For example, I might add a tar command and want to know when it has completed. The below commands will create a tarball and use libnotify commands to create an inobtrusive popup window on my desktop when the tarball creation is complete. The popup will be dismissed automatically after a timeout.

$ ts tar czvf /tmp/mytarball.tar.gz liberror-2.1.80011 11 $ ts notify-send "tarball creation" "the long running tar creation process is complete." 12 $ ts ID State Output E-Level Times(r/u/s) Command [run=0/1] 11 finished /tmp/ts-out.O6epsS 0 4.64/4.31/0.29 tar czvf /tmp/mytarball.tar.gz liberror-2.1.80011 12 finished /tmp/ts-out.4KbPSE 0 0.05/0.00/0.02 notify-send tarball creation the long... is complete.

Notice in the output above, toward the far right of the header information, the run=0/1 line. This tells you that Task Spooler is executing nothing, and can possibly execute one task. Task spooler allows you to execute multiple tasks at once from your task queue to take advantage of multicore CPUs. The -S option allows you to set how many tasks can be executed in parallel from the queue, as shown below.

$ ts -S 2 $ ts ID State Output E-Level Times(r/u/s) Command [run=0/2] 6 finished /tmp/ts-out.QoKfo9 0 0.00/0.00/0.00 echo hello world

If you have two tasks that you want to execute with Task Spooler but one depends on the other having already been executed (and perhaps that the previous job has succeeded too) you can handle this by having one task wait for the other to complete before executing. This becomes more important on a quad core machine when you might have told Task Spooler that it can execute three tasks in parallel. The commands shown below create an explicit dependency, making sure that the second command is executed only if the first has completed successfully, even when the queue allows multiple tasks to be executed. The first command is queued normally using ts. I use a subshell to execute the commands by having ts explicitly start a new bash shell. The second command uses the -d option, which tells ts to execute the command only after the successful completion of the last command that was appended to the queue. When I first inspect the queue I can see that the first command (28) is executing. The second command is queued but has not been added to the list of executing tasks because Task Spooler is aware that it cannot execute until task 28 is complete. The second time I view the queue, both tasks have completed.

$ ts bash -c "sleep 10; echo hi" 28 $ ts -d echo there 29 $ ts ID State Output E-Level Times(r/u/s) Command [run=1/2] 28 running /tmp/ts-out.hKqDva bash -c sleep 10; echo hi 29 queued (file) && echo there $ ts ID State Output E-Level Times(r/u/s) Command [run=0/2] 28 finished /tmp/ts-out.hKqDva 0 10.01/0.00/0.01 bash -c sleep 10; echo hi 29 finished /tmp/ts-out.VDtVp7 0 0.00/0.00/0.00 && echo there $ cat /tmp/ts-out.hKqDva hi $ cat /tmp/ts-out.VDtVp7 there

You can also explicitly set dependencies on other tasks as shown below. Because the ts command prints the ID of a new task to the console, the first command puts that ID into a shell variable for use in the second command. The second command passes the task ID of the first task to ts, telling it to wait for the task with that ID to complete before returning. Because this is joined with the command we wish to execute with the && operation, the second command will execute only if the first one has finished and succeeded.

The first time we view the queue you can see that both tasks are running. The first task will be in the sleep command that we used explicitly to slow down its execution. The second command will be executing ts, which will be waiting for the first task to complete. One downside of tracking dependencies this way is that the second command is added to the running queue even though it cannot do anything until the first task is complete.

$ FIRST_TASKID=`ts bash -c "sleep 10; echo hi"` $ ts sh -c "ts -w $FIRST_TASKID && echo there" 25 $ ts ID State Output E-Level Times(r/u/s) Command [run=2/2] 24 running /tmp/ts-out.La9Gmz bash -c sleep 10; echo hi 25 running /tmp/ts-out.Zr2n5u sh -c ts -w 24 && echo there $ ts ID State Output E-Level Times(r/u/s) Command [run=0/2] 24 finished /tmp/ts-out.La9Gmz 0 10.01/0.00/0.00 bash -c sleep 10; echo hi 25 finished /tmp/ts-out.Zr2n5u 0 9.47/0.00/0.01 sh -c ts -w 24 && echo there $ ts -c 24 hi $ ts -c 25 there

Wrap-up

Task Spooler allows you to convert a shell command to a queued command by simply prepending ts to the command line. One major advantage of using ts over something like the at command is that you can effectively run tail -f on the output of a running task and also get at the output of completed tasks from the command line. The utility's ability to execute multiple tasks in parallel is very handy if you are running on a multicore CPU. Because you can explicitly wait for a task, you can set up very complex interactions where you might have several tasks running at once and have jobs that depend on multiple other tasks to complete successfully before they can execute.

Because you can make explicitly dependant tasks take up slots in the actively running task queue, you can effectively delay the execution of the queue until a time of your choosing. For example, if you queue up a task that waits for a specific time before returning successfully and have a small group of other tasks that are dependent on this first task to complete, then no tasks in the queue will execute until the first task completes.

Ben Martin has been working on filesystems for more than 10 years. He completed his Ph.D. and now offers consulting services focused on libferris, filesystems, and search solutions.

Print    Comments   

Comments

on Queuing tasks for batch execution with Task Spooler

Note: Comments are owned by the poster. We are not responsible for their content.

Queuing tasks for batch execution with Task Spooler

Posted by: Anonymous [ip: 208.255.219.36] on August 12, 2008 08:18 PM
Very nice tip! I was able to put it to use today on a Centos 5.2 box.

#

Queuing tasks for batch execution with Task Spooler

Posted by: Anonymous [ip: 130.221.224.7] on August 13, 2008 10:39 PM
Hey this is great and not to be too snarky, only about 30 years behind VMS batch queues! :D Will be using this on my OS X box for sure... thanks for the tip!

#

Queuing tasks for batch execution with Task Spooler

Posted by: Anonymous [ip: 24.131.41.49] on August 14, 2008 03:29 PM
ts - rocks. I'd been doing my batch queuing the hard way for 10 years with scripts, 'cron' and 'at'! I discovered 'ts' about a month ago. I love that you can limit the number of jobs to any number (like the number of cores your system has) OR have different queues based on FIFOs or environment variables OR ... the options are nearly endless.

Or just use the default settings let 'er rip with smart defaults.

Anyone with a PVR/MythTV box who wants to convert content from MPG2 to MPG4, but not overtax their system can use 'nice' and 'ts' to get there easily.

#

This story has been archived. Comments can no longer be posted.



 
ts-0.7.4/web/ts-0.5.4.ebuild0000600000175000017500000000146112240114503014177 0ustar viricviric# Copyright 1999-2008 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Header: $ inherit toolchain-funcs DESCRIPTION="TaskSpooler is a comfortable way of running batch jobs" HOMEPAGE="http://vicerveza.homeunix.net/~viric/soft/ts/" SRC_URI="http://vicerveza.homeunix.net/~viric/soft/ts/${P}.tar.gz" LICENSE="GPL-2" SLOT="0" KEYWORDS="~amd64 ~x86" IUSE="" DEPEND="" RDEPEND="" src_unpack() { unpack ${A} cd "${S}" sed -i \ -e 's|CFLAGS=|CFLAGS+=|' \ -e 's|-g -O0||' \ Makefile || die "sed failed" } src_compile() { emake CC=$(tc-getCC) || die "emake failed" } src_install() { exeinto /usr/bin doexe ts doman ts.1 dodoc Changelog OBJECTIVES PORTABILITY PROTOCOL README TRICKS } src_test() { PATH="${D}/usr/bin:${PATH}" ./testbench.sh || die "tests failed" } ts-0.7.4/web/index.html0000600000175000017500000001747012240114503013725 0ustar viricviric Task Spooler - batch is back!

Task Spooler

As in freshmeat.net:

task spooler is a Unix batch system where the tasks spooled run one after the other. The amount of jobs to run at once can be set at any time. Each user in each system has his own job queue. The tasks are run in the correct context (that of enqueue) from any shell/process, and its output/results can be easily watched. It is very useful when you know that your commands depend on a lot of RAM, a lot of disk use, give a lot of output, or for whatever reason it's better not to run them all at the same time, while you want to keep your resources busy for maximum benfit. Its interface allows using it easily in scripts.

For your first contact, you can read an article at linux.com, which I like as overview, guide and examples (original url). On more advanced usage, don't neglect the TRICKS file in the package.

Features

I wrote Task Spooler because I didn't have any comfortable way of running batch jobs in my linux computer. I wanted to:

  • Queue jobs from different terminals.
  • Use it locally in my machine (not as in network queues).
  • Have a good way of seeing the output of the processes (tail, errorlevels, ...).
  • Easy use: almost no configuration.
  • Easy to use in scripts.

At the end, after some time using and developing ts, it can do something more:

  • It works in GNU systems with the GNU c compiler (Linux, Darwin, Cygwin, FreeBSD, etc).
  • No configuration at all for a simple queue.
  • Good integration with renice, kill, etc. (through `ts -p` and process groups).
  • Have any amount of queues identified by name, writting a simple wrapper script for each (I use ts2, tsio, tsprint, etc).
  • Control how many jobs may run at once in any queue (taking profit of multicores).
  • It never removes the result files, so they can be reached even after we've lost the ts task list.
  • Transparent if used as a subprogram with -nf.
  • Optional separation of stdout and stderr.

You can look at an old (but representative) screenshot of ts-0.2.1 if you want.

Mailing list

I created a GoogleGroup for the program. You look for the archive and the join methods in the taskspooler google group page.

Alessandro Öhler once maintained a mailing list for discussing newer functionalities and interchanging use experiences. I think this doesn't work anymore, but you can look at the old archive or even try to subscribe.

How it works

The queue is maintained by a server process. This server process is started if it isn't there already. The communication goes through a unix socket usually in /tmp/.

When the user requests a job (using a ts client), the client waits for the server message to know when it can start. When the server allows starting , this client usually forks, and runs the command with the proper environment, because the client runs run the job and not the server, like in 'at' or 'cron'. So, the ulimits, environment, pwd,. apply.

When the job finishes, the client notifies the server. At this time, the server may notify any waiting client, and stores the output and the errorlevel of the finished job.

Moreover the client can take advantage of many information from the server: when a job finishes, where does the job output go to, etc.

Download

Download the latest version (GPLv2+ licensed): ts-0.7.2.tar.gz - v0.7.2 - Changelog

Look at the version repository if you are interested in its development.

Андрей Пантюхин (Andrew Pantyukhin) maintains the BSD port.

Alessandro Öhler provided a Gentoo ebuild for 0.4, which with simple changes I updated to the ebuild for 0.6.4. Moreover, the Gentoo Project Sunrise already has also an ebuild (maybe old) for ts.

Alexander V. Inyukhin maintains unofficial debian packages for several platforms.

Pascal Bleser packed the program for SuSE and openSuSE in RPMs for various platforms.

Manual

Look at its manpage (v0.6.1). Here you also have a copy of the help for the same version:

usage: ./ts [action] [-ngfmd] [-L <lab>] [cmd...]
Env vars:
  TS_SOCKET  the path to the unix socket used by the ts command.
  TS_MAILTO  where to mail the result (on -m). Local user by default.
  TS_MAXFINISHED  maximum finished jobs in the queue.
  TS_ONFINISH  binary called on job end (passes jobid, error, outfile, command).
  TS_ENV  command called on enqueue. Its output determines the job information.
  TS_SAVELIST  filename which will store the list, if the server dies.
  TS_SLOTS   amount of jobs which can run at once, read on server start.
Actions:
  -K       kill the task spooler server
  -C       clear the list of finished jobs
  -l       show the job list (default action)
  -S [num] set the number of max simultanious jobs of the server.
  -t [id]  tail -f the output of the job. Last run if not specified.
  -c [id]  cat the output of the job. Last run if not specified.
  -p [id]  show the pid of the job. Last run if not specified.
  -o [id]  show the output file. Of last job run, if not specified.
  -i [id]  show job information. Of last job run, if not specified.
  -s [id]  show the job state. Of the last added, if not specified.
  -r [id]  remove a job. The last added, if not specified.
  -w [id]  wait for a job. The last added, if not specified.
  -u [id]  put that job first. The last added, if not specified.
  -U <id-id>  swap two jobs in the queue.
  -h       show this help
  -V       show the program version
Options adding jobs:
  -n       don't store the output of the command.
  -g       gzip the stored output (if not -n).
  -f       don't fork into background.
  -m       send the output by e-mail (uses sendmail).
  -d       the job will be run only if the job before ends well
  -L <lab> name this task with a label, to be distinguished on listing.

Thanks

  • To Raúl Salinas, for his inspiring ideas
  • To Alessandro Öhler, the first non-acquaintance user, who proposed and created the mailing list.
  • Андрею Пантюхину, who created the BSD port.
  • To the useful, although sometimes uncomfortable, UNIX interface.
  • To Alexander V. Inyukhin, for the debian packages.
  • To Pascal Bleser, for the SuSE packages.
  • To Sergio Ballestrero, who sent code and motivated the development of a multislot version of ts.
  • To GNU, an ugly but working and helpful ol' UNIX implementation.
Author: Lluís Batlle i Rossell, viric_at_vicerveza_dot_homeunix_dot_net
ts-0.7.4/msg.c0000600000175000017500000000347012240114503012100 0ustar viricviric/* Task Spooler - a task queue system for the unix user Copyright (C) 2007-2009 Lluís Batlle i Rossell Please find the license in the provided COPYING file. */ #include #include #include #include #include #include "main.h" void send_bytes(const int fd, const char *data, int bytes) { int res; int offset = 0; while(1) { res = send(fd, data + offset, bytes, 0); if(res == -1) { warning("Sending %i bytes to %i.", bytes, fd); break; } if(res == bytes) break; offset += res; bytes -= res; } } int recv_bytes(const int fd, char *data, int bytes) { int res; int offset = 0; while(1) { res = recv(fd, data + offset, bytes, 0); if(res == -1) { warning("Receiving %i bytes from %i.", bytes, fd); break; } if(res == bytes) break; offset += res; bytes -= res; } return res; } void send_msg(const int fd, const struct msg *m) { int res; if (0) msgdump(stderr, m); res = send(fd, m, sizeof(*m), 0); if(res == -1 || res != sizeof(*m)) warning_msg(m, "Sending a message to %i, sent %i bytes, should " "send %i.", fd, res, sizeof(*m)); } int recv_msg(const int fd, struct msg *m) { int res; res = recv(fd, m, sizeof(*m), 0); if(res == -1) warning_msg(m, "Receiving a message from %i.", fd); if (res == sizeof(*m) && 0) msgdump(stderr, m); if (res != sizeof(*m) && res > 0) warning_msg(m, "Receiving a message from %i, received %i bytes, " "should have received %i.", fd, res, sizeof(*m)); return res; } ts-0.7.4/makedeb0000700000175000017500000000051012240114503012452 0ustar viricviric#!/bin/sh # This should create a debian package. First try. Never done before! version=0.5.4 make install PREFIX=deb/usr cd deb # data.tar.gz tar c --owner root --group root -v -z -f pkg/data.tar.gz usr # control.tar.gz find usr/ -type f | xargs md5sum > pkg/DEBIAN/md5sums dpkg -b pkg mv pkg.deb ts_${version}_i386.deb ts-0.7.4/PROTOCOL0000600000175000017500000000104512240114503012326 0ustar viricviric[ Totally outdated document ] New job ------------------------- Client: Msg [ new job (commandsize,filenamesize)] Client: Command (+null) Server: Msg [ NewJob OK ] --- pause until the server allows running --- Server: Msg [ RunJOB ] Client: Msg [ RunJOB OK ] Client: (if needed) Filename (+null) --- pause until the client process finishes --- Client: Msg [ EndJOB ] Client: close. List ------------------------- Client: Msg [ LIST ] Server: Msg [ List_line ] Server: line (+null) Server: Msg [ List_line ] Server: line (+null) ... Server: close. ts-0.7.4/list.c0000600000175000017500000001277312240114503012273 0ustar viricviric/* Task Spooler - a task queue system for the unix user Copyright (C) 2007-2009 Lluís Batlle i Rossell Please find the license in the provided COPYING file. */ #include #include #include #include #include "main.h" /* From jobs.c */ extern int busy_slots; extern int max_slots; char * joblistdump_headers() { char * line; line = malloc(600); snprintf(line, 600, "#!/bin/sh\n# - task spooler (ts) job dump\n" "# This file has been created because a SIGTERM killed\n" "# your queue server.\n" "# The finished commands are listed first.\n" "# The commands running or to be run are stored as you would\n" "# probably run them. Take care - some quotes may have got" " broken\n\n"); return line; } char * joblist_headers() { char * line; line = malloc(100); snprintf(line, 100, "%-4s %-10s %-20s %-8s %-14s %s [run=%i/%i]\n", "ID", "State", "Output", "E-Level", "Times(r/u/s)", "Command", busy_slots, max_slots); return line; } static int max(int a, int b) { if (a > b) return a; return b; } static const char * ofilename_shown(const struct Job *p) { const char * output_filename; if (p->state == SKIPPED) { output_filename = "(no output)"; } else if (p->store_output) { if (p->state == QUEUED) { output_filename = "(file)"; } else { if (p->output_filename == 0) /* This may happen due to concurrency * problems */ output_filename = "(...)"; else output_filename = p->output_filename; } } else output_filename = "stdout"; return output_filename; } static char * print_noresult(const struct Job *p) { const char * jobstate; const char * output_filename; int maxlen; char * line; /* 18 chars should suffice for a string like "[int]&& " */ char dependstr[18] = ""; jobstate = jstate2string(p->state); output_filename = ofilename_shown(p); maxlen = 4 + 1 + 10 + 1 + max(20, strlen(output_filename)) + 1 + 8 + 1 + 14 + 1 + strlen(p->command) + 20; /* 20 is the margin for errors */ if (p->label) maxlen += 3 + strlen(p->label); if (p->do_depend) { maxlen += sizeof(dependstr); if (p->depend_on == -1) snprintf(dependstr, sizeof(dependstr), "&& "); else snprintf(dependstr, sizeof(dependstr), "[%i]&& ", p->depend_on); } line = (char *) malloc(maxlen); if (line == NULL) error("Malloc for %i failed.\n", maxlen); if (p->label) snprintf(line, maxlen, "%-4i %-10s %-20s %-8s %14s %s[%s]%s\n", p->jobid, jobstate, output_filename, "", "", dependstr, p->label, p->command); else snprintf(line, maxlen, "%-4i %-10s %-20s %-8s %14s %s%s\n", p->jobid, jobstate, output_filename, "", "", dependstr, p->command); return line; } static char * print_result(const struct Job *p) { const char * jobstate; int maxlen; char * line; const char * output_filename; /* 18 chars should suffice for a string like "[int]&& " */ char dependstr[18] = ""; jobstate = jstate2string(p->state); output_filename = ofilename_shown(p); maxlen = 4 + 1 + 10 + 1 + max(20, strlen(output_filename)) + 1 + 8 + 1 + 14 + 1 + strlen(p->command) + 20; /* 20 is the margin for errors */ if (p->label) maxlen += 3 + strlen(p->label); if (p->do_depend) { maxlen += sizeof(dependstr); if (p->depend_on == -1) snprintf(dependstr, sizeof(dependstr), "&& "); else snprintf(dependstr, sizeof(dependstr), "[%i]&& ", p->depend_on); } line = (char *) malloc(maxlen); if (line == NULL) error("Malloc for %i failed.\n", maxlen); if (p->label) snprintf(line, maxlen, "%-4i %-10s %-20s %-8i %0.2f/%0.2f/%0.2f %s[%s]" "%s\n", p->jobid, jobstate, output_filename, p->result.errorlevel, p->result.real_ms, p->result.user_ms, p->result.system_ms, dependstr, p->label, p->command); else snprintf(line, maxlen, "%-4i %-10s %-20s %-8i %0.2f/%0.2f/%0.2f %s%s\n", p->jobid, jobstate, output_filename, p->result.errorlevel, p->result.real_ms, p->result.user_ms, p->result.system_ms, dependstr, p->command); return line; } char * joblist_line(const struct Job *p) { char * line; if (p->state == FINISHED) line = print_result(p); else line = print_noresult(p); return line; } char * joblistdump_torun(const struct Job *p) { int maxlen; char * line; maxlen = 10 + strlen(p->command) + 20; /* 20 is the margin for errors */ line = (char *) malloc(maxlen); if (line == NULL) error("Malloc for %i failed.\n", maxlen); snprintf(line, maxlen, "ts %s\n", p->command); return line; } ts-0.7.4/client.c0000600000175000017500000003626512240114503012600 0ustar viricviric/* Task Spooler - a task queue system for the unix user Copyright (C) 2007-2009 Lluís Batlle i Rossell Please find the license in the provided COPYING file. */ #include #include #include #include #include #include #include #include "main.h" static void c_end_of_job(const struct Result *res); static void c_wait_job_send(); static void c_wait_running_job_send(); char *build_command_string() { int size; int i; int num; char **array; char *commandstring; size = 0; num = command_line.command.num; array = command_line.command.array; /* Count bytes needed */ for (i = 0; i < num; ++i) { /* The '1' is for spaces, and at the last i, * for the null character */ size = size + strlen(array[i]) + 1; } /* Alloc */ commandstring = (char *) malloc(size); if(commandstring == NULL) error("Error in malloc for commandstring"); /* Build the command */ strcpy(commandstring, array[0]); for (i = 1; i < num; ++i) { strcat(commandstring, " "); strcat(commandstring, array[i]); } return commandstring; } void c_new_job() { struct msg m; char *new_command; char *myenv; m.type = NEWJOB; new_command = build_command_string(); myenv = get_environment(); /* global */ m.u.newjob.command_size = strlen(new_command) + 1; /* add null */ if (myenv) m.u.newjob.env_size = strlen(myenv) + 1; /* add null */ else m.u.newjob.env_size = 0; if (command_line.label) m.u.newjob.label_size = strlen(command_line.label) + 1; /* add null */ else m.u.newjob.label_size = 0; m.u.newjob.store_output = command_line.store_output; m.u.newjob.do_depend = command_line.do_depend; m.u.newjob.depend_on = command_line.depend_on; m.u.newjob.should_keep_finished = command_line.should_keep_finished; m.u.newjob.command_size = strlen(new_command) + 1; /* add null */ m.u.newjob.wait_enqueuing = command_line.wait_enqueuing; m.u.newjob.num_slots = command_line.num_slots; /* Send the message */ send_msg(server_socket, &m); /* Send the command */ send_bytes(server_socket, new_command, m.u.newjob.command_size); /* Send the label */ send_bytes(server_socket, command_line.label, m.u.newjob.label_size); /* Send the environment */ send_bytes(server_socket, myenv, m.u.newjob.env_size); free(new_command); free(myenv); } int c_wait_newjob_ok() { struct msg m; int res; res = recv_msg(server_socket, &m); if(res == -1) error("Error in wait_newjob_ok"); if(m.type == NEWJOB_NOK) { fprintf(stderr, "Error, queue full\n"); exit(EXITCODE_QUEUE_FULL); } if(m.type != NEWJOB_OK) error("Error getting the newjob_ok"); return m.u.jobid; } int c_wait_server_commands() { struct msg m; int res; while (1) { res = recv_msg(server_socket, &m); if(res == -1) error("Error in wait_server_commands"); if (res == 0) break; if(res != sizeof(m)) error("Error in wait_server_commands"); if (m.type == RUNJOB) { struct Result res; res.skipped = 0; /* These will send RUNJOB_OK */ if (command_line.do_depend && m.u.last_errorlevel != 0) { res.errorlevel = -1; res.user_ms = 0.; res.system_ms = 0.; res.real_ms = 0.; res.skipped = 1; c_send_runjob_ok(0, -1); } else run_job(&res); c_end_of_job(&res); return res.errorlevel; } } return -1; } void c_wait_server_lines() { struct msg m; int res; while (1) { res = recv_msg(server_socket, &m); if(res == -1) error("Error in wait_server_lines"); if (res == 0) break; if(res != sizeof(m)) error("Error in wait_server_lines 2"); if (m.type == LIST_LINE) { char * buffer; buffer = (char *) malloc(m.u.size); recv_bytes(server_socket, buffer, m.u.size); printf("%s", buffer); free(buffer); } } } void c_list_jobs() { struct msg m; m.type = LIST; send_msg(server_socket, &m); } /* Exits if wrong */ void c_check_version() { struct msg m; int res; m.type = GET_VERSION; /* Double send, so an old ts will answer for sure at least once */ send_msg(server_socket, &m); send_msg(server_socket, &m); /* Set up a 2 second timeout to receive the version msg. */ res = recv_msg(server_socket, &m); if(res == -1) error("Error calling recv_msg in c_check_version"); if (m.type != VERSION || m.u.version != PROTOCOL_VERSION) { printf("Wrong server version. Received %i, expecting %i\n", m.u.version, PROTOCOL_VERSION); error("Wrong server version. Received %i, expecting %i", m.u.version, PROTOCOL_VERSION); } /* Receive also the 2nd send_msg if we got the right version */ res = recv_msg(server_socket, &m); if(res == -1) error("Error calling the 2nd recv_msg in c_check_version"); } void c_show_info() { struct msg m; int res; m.type = INFO; m.u.jobid = command_line.jobid; send_msg(server_socket, &m); while (1) { res = recv_msg(server_socket, &m); if(res == -1) error("Error in wait_server_lines"); if (res == 0) break; if(res != sizeof(m)) error("Error in wait_server_lines 2"); if (m.type == INFO_DATA) { char * buffer; enum { DSIZE = 1000 }; /* We're going to output data using the stdout fd */ fflush(stdout); buffer = (char *) malloc(DSIZE); do { res = recv(server_socket, buffer, DSIZE, 0); if (res > 0) write(1, buffer, res); } while(res > 0); free(buffer); } } } void c_send_runjob_ok(const char *ofname, int pid) { struct msg m; /* Prepare the message */ m.type = RUNJOB_OK; if (ofname) /* ofname == 0, skipped execution */ m.u.output.store_output = command_line.store_output; else m.u.output.store_output = 0; m.u.output.pid = pid; if (m.u.output.store_output) m.u.output.ofilename_size = strlen(ofname) + 1; else m.u.output.ofilename_size = 0; send_msg(server_socket, &m); /* Send the filename */ if (command_line.store_output) send_bytes(server_socket, ofname, m.u.output.ofilename_size); } static void c_end_of_job(const struct Result *res) { struct msg m; m.type = ENDJOB; m.u.result = *res; /* struct copy */ send_msg(server_socket, &m); } void c_shutdown_server() { struct msg m; m.type = KILL_SERVER; send_msg(server_socket, &m); } void c_clear_finished() { struct msg m; m.type = CLEAR_FINISHED; send_msg(server_socket, &m); } static char * get_output_file(int *pid) { struct msg m; int res; char *string = 0; /* Send the request */ m.type = ASK_OUTPUT; m.u.jobid = command_line.jobid; send_msg(server_socket, &m); /* Receive the answer */ res = recv_msg(server_socket, &m); if(res != sizeof(m)) error("Error in get_output_file"); switch(m.type) { case ANSWER_OUTPUT: if (m.u.output.store_output) { /* Receive the output file name */ string = 0; if (m.u.output.ofilename_size > 0) { string = (char *) malloc(m.u.output.ofilename_size); recv_bytes(server_socket, string, m.u.output.ofilename_size); } *pid = m.u.output.pid; return string; } *pid = m.u.output.pid; return 0; /* WILL NOT GO FURTHER */ case LIST_LINE: /* Only ONE line accepted */ string = (char *) malloc(m.u.size); res = recv_bytes(server_socket, string, m.u.size); if(res != m.u.size) error("Error in get_output_file line size"); fprintf(stderr, "Error in the request: %s", string); exit(-1); /* WILL NOT GO FURTHER */ default: warning("Wrong internal message in get_output_file line size"); } /* This will never be reached */ return 0; } int c_tail() { char *str; int pid; str = get_output_file(&pid); if (str == 0) { fprintf(stderr, "The output is not stored. Cannot tail.\n"); exit(-1); } c_wait_running_job_send(); return tail_file(str, 10 /* Last lines to show */); } int c_cat() { char *str; int pid; str = get_output_file(&pid); if (str == 0) { fprintf(stderr, "The output is not stored. Cannot cat.\n"); exit(-1); } c_wait_running_job_send(); return tail_file(str, -1 /* All the lines */); } void c_show_output_file() { char *str; int pid; /* This will exit if there is any error */ str = get_output_file(&pid); if (str == 0) { fprintf(stderr, "The output is not stored.\n"); exit(-1); } printf("%s\n", str); free(str); } void c_show_pid() { int pid; /* This will exit if there is any error */ get_output_file(&pid); printf("%i\n", pid); } void c_remove_job() { struct msg m; int res; char *string = 0; /* Send the request */ m.type = REMOVEJOB; m.u.jobid = command_line.jobid; send_msg(server_socket, &m); /* Receive the answer */ res = recv_msg(server_socket, &m); if(res != sizeof(m)) error("Error in remove_job"); switch(m.type) { case REMOVEJOB_OK: return; /* WILL NOT GO FURTHER */ case LIST_LINE: /* Only ONE line accepted */ string = (char *) malloc(m.u.size); res = recv_bytes(server_socket, string, m.u.size); fprintf(stderr, "Error in the request: %s", string); exit(-1); /* WILL NOT GO FURTHER */ default: warning("Wrong internal message in remove_job"); } /* This will never be reached */ } int c_wait_job_recv() { struct msg m; int res; char *string = 0; /* Receive the answer */ res = recv_msg(server_socket, &m); if(res != sizeof(m)) error("Error in wait_job"); switch(m.type) { case WAITJOB_OK: return m.u.result.errorlevel; /* WILL NOT GO FURTHER */ case LIST_LINE: /* Only ONE line accepted */ string = (char *) malloc(m.u.size); res = recv_bytes(server_socket, string, m.u.size); if(res != m.u.size) error("Error in wait_job - line size"); fprintf(stderr, "Error in the request: %s", string); exit(-1); /* WILL NOT GO FURTHER */ default: warning("Wrong internal message in c_wait_job"); } /* This will never be reached */ return -1; } static void c_wait_job_send() { struct msg m; /* Send the request */ m.type = WAITJOB; m.u.jobid = command_line.jobid; send_msg(server_socket, &m); } static void c_wait_running_job_send() { struct msg m; /* Send the request */ m.type = WAIT_RUNNING_JOB; m.u.jobid = command_line.jobid; send_msg(server_socket, &m); } /* Returns the errorlevel */ int c_wait_job() { c_wait_job_send(); return c_wait_job_recv(); } /* Returns the errorlevel */ int c_wait_running_job() { c_wait_running_job_send(); return c_wait_job_recv(); } void c_send_max_slots(int max_slots) { struct msg m; /* Send the request */ m.type = SET_MAX_SLOTS; m.u.max_slots = command_line.max_slots; send_msg(server_socket, &m); } void c_get_max_slots() { struct msg m; int res; /* Send the request */ m.type = GET_MAX_SLOTS; m.u.max_slots = command_line.max_slots; send_msg(server_socket, &m); /* Receive the answer */ res = recv_msg(server_socket, &m); if(res != sizeof(m)) error("Error in move_urgent"); switch(m.type) { case GET_MAX_SLOTS_OK: printf("%i\n", m.u.max_slots); return; default: warning("Wrong internal message in get_max_slots"); } } void c_move_urgent() { struct msg m; int res; char *string = 0; /* Send the request */ m.type = URGENT; m.u.jobid = command_line.jobid; send_msg(server_socket, &m); /* Receive the answer */ res = recv_msg(server_socket, &m); if(res != sizeof(m)) error("Error in move_urgent"); switch(m.type) { case URGENT_OK: return; /* WILL NOT GO FURTHER */ case LIST_LINE: /* Only ONE line accepted */ string = (char *) malloc(m.u.size); res = recv_bytes(server_socket, string, m.u.size); if(res != m.u.size) error("Error in move_urgent - line size"); fprintf(stderr, "Error in the request: %s", string); exit(-1); /* WILL NOT GO FURTHER */ default: warning("Wrong internal message in move_urgent"); } /* This will never be reached */ return; } void c_get_state() { struct msg m; int res; char *string = 0; /* Send the request */ m.type = GET_STATE; m.u.jobid = command_line.jobid; send_msg(server_socket, &m); /* Receive the answer */ res = recv_msg(server_socket, &m); if(res != sizeof(m)) error("Error in get_state - line size"); switch(m.type) { case ANSWER_STATE: printf("%s\n", jstate2string(m.u.state)); return; /* WILL NOT GO FURTHER */ case LIST_LINE: /* Only ONE line accepted */ string = (char *) malloc(m.u.size); res = recv_bytes(server_socket, string, m.u.size); if(res != m.u.size) error("Error in get_state - line size"); fprintf(stderr, "Error in the request: %s", string); exit(-1); /* WILL NOT GO FURTHER */ default: warning("Wrong internal message in get_state"); } /* This will never be reached */ return; } void c_swap_jobs() { struct msg m; int res; char *string = 0; /* Send the request */ m.type = SWAP_JOBS; m.u.swap.jobid1 = command_line.jobid; m.u.swap.jobid2 = command_line.jobid2; send_msg(server_socket, &m); /* Receive the answer */ res = recv_msg(server_socket, &m); if(res != sizeof(m)) error("Error in swap_jobs"); switch(m.type) { case SWAP_JOBS_OK: return; /* WILL NOT GO FURTHER */ case LIST_LINE: /* Only ONE line accepted */ string = (char *) malloc(m.u.size); res = recv_bytes(server_socket, string, m.u.size); if(res != m.u.size) error("Error in swap_jobs - line size"); fprintf(stderr, "Error in the request: %s", string); exit(-1); /* WILL NOT GO FURTHER */ default: warning("Wrong internal message in swap_jobs"); } /* This will never be reached */ return; } ts-0.7.4/ttail.c0000600000175000017500000000025712240114503012427 0ustar viricviric#include /* Dep de main.h */ #include /* Dep de main.h */ #include "main.h" int main(int argc, char **argv) { tail_file(argv[1]); return 0; } ts-0.7.4/ts.10000600000175000017500000002362212240114503011657 0ustar viricviric.\" Copyright Lluís Batlle i Rossell .\" .\" This file may be copied under the conditions described .\" in the LDP GENERAL PUBLIC LICENSE, Version 1, September 1998 .\" that should have been distributed together with this file. .\" .\" Note: I took the gnu 'ls' man page as an example. .TH TS 1 2012-05 "Task Spooler 0.7.4" .SH NAME ts \- task spooler. A simple unix batch system .SH SYNOPSIS .BI "ts [" actions "] [" options "] [" command... ] .sp Actions: .BI "[\-KClhV] .BI "[\-t ["id ]] .BI "[\-c ["id ]] .BI "[\-p ["id ]] .BI "[\-o ["id ]] .BI "[\-s ["id ]] .BI "[\-r ["id ]] .BI "[\-w ["id ]] .BI "[\-u ["id ]] .BI "[\-i ["id ]] .BI "[\-U <"id - id >] .BI "[\-S ["num ]] .sp Options: .BI "[\-nfgmd]" .BI "[\-L <"label >] .BI "[\-D <"id >] .SH DESCRIPTION .B ts will run by default a per user unix task queue. The user can add commands to the queue, watch that queue at any moment, and look at the task results (actually, standard output and exit error). .SH SIMPLE USE Calling .B ts with a command will add that command to the queue, and calling it without commands or parameters will show the task list. .SH COMMAND OPTIONS When adding a job to ts, we can specify how it will be run and how will the results be collected: .TP .B "\-n" Do not store the standard output/error in a file at .B $TMPDIR - let it use the file descriptors decided by the calling process. If it is not used, the .B jobid for the new task will be outputed to stdout. .TP .B "\-g" Pass the output through gzip (only if .B \-n ). Note that the output files will not have a .gz extension. .TP .B "\-f" Don not put the task into background. Wait the queue and the command run without getting detached of the terminal. The exit code will be that of the command, and if used together with \-n, no result will be stored in the queue. .TP .B "\-m" Mail the results of the command (output and exit code) to .B $TS_MAILTO , or to the .B $USER using .B /usr/sbin/sendmail. Look at .B ENVIRONMENT. .TP .B "\-L